chore: merge v0.11.0 release
This commit is contained in:
commit
d74e746a7f
70 changed files with 1910 additions and 914 deletions
3
.config/nextest.toml
Normal file
3
.config/nextest.toml
Normal file
|
@ -0,0 +1,3 @@
|
|||
[profile.default]
|
||||
failure-output = "immediate-final"
|
||||
fail-fast = false
|
25
.github/workflows/build.yml
vendored
Normal file
25
.github/workflows/build.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Runs build related jobs.
|
||||
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
no-std:
|
||||
name: Build for no-std
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Build for no-std
|
||||
run: |
|
||||
rustup update --no-self-update ${{ matrix.toolchain }}
|
||||
rustup target add wasm32-unknown-unknown
|
||||
make build-no-std
|
23
.github/workflows/changelog.yml
vendored
Normal file
23
.github/workflows/changelog.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Runs changelog related jobs.
|
||||
# CI job heavily inspired by: https://github.com/tarides/changelog-check-action
|
||||
|
||||
name: changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@main
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check for changes in changelog
|
||||
env:
|
||||
BASE_REF: ${{ github.event.pull_request.base.ref }}
|
||||
NO_CHANGELOG_LABEL: ${{ contains(github.event.pull_request.labels.*.name, 'no changelog') }}
|
||||
run: ./scripts/check-changelog.sh "${{ inputs.changelog }}"
|
||||
shell: bash
|
31
.github/workflows/doc.yml
vendored
31
.github/workflows/doc.yml
vendored
|
@ -1,31 +0,0 @@
|
|||
# Runs documentation related jobs.
|
||||
|
||||
name: doc
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
docs:
|
||||
name: Verify the docs on ${{matrix.toolchain}}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - doc
|
||||
run: cargo make doc
|
81
.github/workflows/lint.yml
vendored
81
.github/workflows/lint.yml
vendored
|
@ -4,63 +4,50 @@ name: lint
|
|||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
name: clippy nightly on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Clippy
|
||||
run: |
|
||||
rustup update --no-self-update nightly
|
||||
rustup +nightly component add clippy
|
||||
make clippy
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt check nightly on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Rustfmt
|
||||
run: |
|
||||
rustup update --no-self-update nightly
|
||||
rustup +nightly component add rustfmt
|
||||
make format-check
|
||||
|
||||
doc:
|
||||
name: doc stable on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@main
|
||||
- name: Build docs
|
||||
run: |
|
||||
rustup update --no-self-update
|
||||
make doc
|
||||
|
||||
version:
|
||||
name: check rust version consistency
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@main
|
||||
with:
|
||||
profile: minimal
|
||||
override: true
|
||||
- name: check rust versions
|
||||
run: ./scripts/check-rust-version.sh
|
||||
|
||||
rustfmt:
|
||||
name: rustfmt ${{matrix.toolchain}} on ${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [nightly]
|
||||
os: [ubuntu]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install minimal Rust with rustfmt
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
components: rustfmt
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - format-check
|
||||
run: cargo make format-check
|
||||
|
||||
clippy:
|
||||
name: clippy ${{matrix.toolchain}} on ${{matrix.os}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable]
|
||||
os: [ubuntu]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install minimal Rust with clippy
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
components: clippy
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - clippy
|
||||
run: cargo make clippy
|
||||
|
|
32
.github/workflows/no-std.yml
vendored
32
.github/workflows/no-std.yml
vendored
|
@ -1,32 +0,0 @@
|
|||
# Runs no-std related jobs.
|
||||
|
||||
name: no-std
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
no-std:
|
||||
name: build ${{matrix.toolchain}} no-std for wasm32-unknown-unknown
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- run: rustup target add wasm32-unknown-unknown
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - build-no-std
|
||||
run: cargo make build-no-std
|
26
.github/workflows/test.yml
vendored
26
.github/workflows/test.yml
vendored
|
@ -1,34 +1,28 @@
|
|||
# Runs testing related jobs
|
||||
# Runs test related jobs.
|
||||
|
||||
name: test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
branches: [main, next]
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.features}}
|
||||
name: test ${{matrix.toolchain}} on ${{matrix.os}} with ${{matrix.args}}
|
||||
runs-on: ${{matrix.os}}-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
toolchain: [stable, nightly]
|
||||
os: [ubuntu]
|
||||
features: ["test", "test-no-default-features"]
|
||||
args: [default, no-std]
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{matrix.toolchain}}
|
||||
override: true
|
||||
- uses: davidB/rust-cargo-make@v1
|
||||
- name: cargo make - test
|
||||
run: cargo make ${{matrix.features}}
|
||||
- uses: actions/checkout@main
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- name: Perform tests
|
||||
run: |
|
||||
rustup update --no-self-update ${{matrix.toolchain}}
|
||||
make test-${{matrix.args}}
|
||||
|
|
|
@ -1,43 +1,34 @@
|
|||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: pretty-format-json
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- repo: https://github.com/hackaugusto/pre-commit-cargo
|
||||
rev: v1.0.0
|
||||
hooks:
|
||||
# Allows cargo fmt to modify the source code prior to the commit
|
||||
- id: cargo
|
||||
name: Cargo fmt
|
||||
args: ["+stable", "fmt", "--all"]
|
||||
stages: [commit]
|
||||
# Requires code to be properly formatted prior to pushing upstream
|
||||
- id: cargo
|
||||
name: Cargo fmt --check
|
||||
args: ["+stable", "fmt", "--all", "--check"]
|
||||
stages: [push, manual]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets
|
||||
args: ["+stable", "check", "--all-targets"]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets --no-default-features
|
||||
args: ["+stable", "check", "--all-targets", "--no-default-features"]
|
||||
- id: cargo
|
||||
name: Cargo check --all-targets --features default,std,serde
|
||||
args: ["+stable", "check", "--all-targets", "--features", "default,std,serde"]
|
||||
# Unlike fmt, clippy will not be automatically applied
|
||||
- id: cargo
|
||||
name: Cargo clippy
|
||||
args: ["+nightly", "clippy", "--workspace", "--", "--deny", "clippy::all", "--deny", "warnings"]
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: check-toml
|
||||
- id: pretty-format-json
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: lint
|
||||
name: Make lint
|
||||
stages: [commit]
|
||||
language: rust
|
||||
entry: make lint
|
||||
- id: doc
|
||||
name: Make doc
|
||||
stages: [commit]
|
||||
language: rust
|
||||
entry: make doc
|
||||
- id: check
|
||||
name: Make check
|
||||
stages: [commit]
|
||||
language: rust
|
||||
entry: make check
|
||||
|
|
100
CHANGELOG.md
100
CHANGELOG.md
|
@ -1,96 +1,106 @@
|
|||
## 0.11.0 (2024-10-17)
|
||||
|
||||
- [BREAKING]: renamed `Mmr::open()` into `Mmr::open_at()` and `Mmr::peaks()` into `Mmr::peaks_at()` (#234).
|
||||
- Added `Mmr::open()` and `Mmr::peaks()` which rely on `Mmr::open_at()` and `Mmr::peaks()` respectively (#234).
|
||||
- Standardized CI and Makefile across Miden repos (#323).
|
||||
- Added `Smt::compute_mutations()` and `Smt::apply_mutations()` for validation-checked insertions (#327).
|
||||
- Changed padding rule for RPO/RPX hash functions (#318).
|
||||
- [BREAKING] Changed return value of the `Mmr::verify()` and `MerklePath::verify()` from `bool` to `Result<>` (#335).
|
||||
- Added `is_empty()` functions to the `SimpleSmt` and `Smt` structures. Added `EMPTY_ROOT` constant to the `SparseMerkleTree` trait (#337).
|
||||
|
||||
## 0.10.3 (2024-09-25)
|
||||
|
||||
* Implement `get_size_hint` for `Smt` (#331).
|
||||
- Implement `get_size_hint` for `Smt` (#331).
|
||||
|
||||
## 0.10.2 (2024-09-25)
|
||||
|
||||
* Implement `get_size_hint` for `RpoDigest` and `RpxDigest` and expose constants for their serialized size (#330).
|
||||
- Implement `get_size_hint` for `RpoDigest` and `RpxDigest` and expose constants for their serialized size (#330).
|
||||
|
||||
## 0.10.1 (2024-09-13)
|
||||
|
||||
* Added `Serializable` and `Deserializable` implementations for `PartialMmr` and `InOrderIndex` (#329).
|
||||
- Added `Serializable` and `Deserializable` implementations for `PartialMmr` and `InOrderIndex` (#329).
|
||||
|
||||
## 0.10.0 (2024-08-06)
|
||||
|
||||
* Added more `RpoDigest` and `RpxDigest` conversions (#311).
|
||||
* [BREAKING] Migrated to Winterfell v0.9 (#315).
|
||||
* Fixed encoding of Falcon secret key (#319).
|
||||
- Added more `RpoDigest` and `RpxDigest` conversions (#311).
|
||||
- [BREAKING] Migrated to Winterfell v0.9 (#315).
|
||||
- Fixed encoding of Falcon secret key (#319).
|
||||
|
||||
## 0.9.3 (2024-04-24)
|
||||
|
||||
* Added `RpxRandomCoin` struct (#307).
|
||||
- Added `RpxRandomCoin` struct (#307).
|
||||
|
||||
## 0.9.2 (2024-04-21)
|
||||
|
||||
* Implemented serialization for the `Smt` struct (#304).
|
||||
* Fixed a bug in Falcon signature generation (#305).
|
||||
- Implemented serialization for the `Smt` struct (#304).
|
||||
- Fixed a bug in Falcon signature generation (#305).
|
||||
|
||||
## 0.9.1 (2024-04-02)
|
||||
|
||||
* Added `num_leaves()` method to `SimpleSmt` (#302).
|
||||
- Added `num_leaves()` method to `SimpleSmt` (#302).
|
||||
|
||||
## 0.9.0 (2024-03-24)
|
||||
|
||||
* [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
|
||||
* [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
|
||||
* [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
|
||||
- [BREAKING] Removed deprecated re-exports from liballoc/libstd (#290).
|
||||
- [BREAKING] Refactored RpoFalcon512 signature to work with pure Rust (#285).
|
||||
- [BREAKING] Added `RngCore` as supertrait for `FeltRng` (#299).
|
||||
|
||||
# 0.8.4 (2024-03-17)
|
||||
|
||||
* Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
|
||||
- Re-added unintentionally removed re-exported liballoc macros (`vec` and `format` macros).
|
||||
|
||||
# 0.8.3 (2024-03-17)
|
||||
|
||||
* Re-added unintentionally removed re-exported liballoc macros (#292).
|
||||
- Re-added unintentionally removed re-exported liballoc macros (#292).
|
||||
|
||||
# 0.8.2 (2024-03-17)
|
||||
|
||||
* Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
|
||||
- Updated `no-std` approach to be in sync with winterfell v0.8.3 release (#290).
|
||||
|
||||
## 0.8.1 (2024-02-21)
|
||||
|
||||
* Fixed clippy warnings (#280)
|
||||
- Fixed clippy warnings (#280)
|
||||
|
||||
## 0.8.0 (2024-02-14)
|
||||
|
||||
* Implemented the `PartialMmr` data structure (#195).
|
||||
* Implemented RPX hash function (#201).
|
||||
* Added `FeltRng` and `RpoRandomCoin` (#237).
|
||||
* Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
|
||||
* Added `inner_nodes()` method to `PartialMmr` (#238).
|
||||
* Improved `PartialMmr::apply_delta()` (#242).
|
||||
* Refactored `SimpleSmt` struct (#245).
|
||||
* Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
|
||||
* Updated Winterfell dependency to v0.8 (#275).
|
||||
- Implemented the `PartialMmr` data structure (#195).
|
||||
- Implemented RPX hash function (#201).
|
||||
- Added `FeltRng` and `RpoRandomCoin` (#237).
|
||||
- Accelerated RPO/RPX hash functions using AVX512 instructions (#234).
|
||||
- Added `inner_nodes()` method to `PartialMmr` (#238).
|
||||
- Improved `PartialMmr::apply_delta()` (#242).
|
||||
- Refactored `SimpleSmt` struct (#245).
|
||||
- Replaced `TieredSmt` struct with `Smt` struct (#254, #277).
|
||||
- Updated Winterfell dependency to v0.8 (#275).
|
||||
|
||||
## 0.7.1 (2023-10-10)
|
||||
|
||||
* Fixed RPO Falcon signature build on Windows.
|
||||
- Fixed RPO Falcon signature build on Windows.
|
||||
|
||||
## 0.7.0 (2023-10-05)
|
||||
|
||||
* Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
|
||||
* Implemented clearing of nodes in `TieredSmt` (#173).
|
||||
* Added ability to generate inclusion proofs for `TieredSmt` (#174).
|
||||
* Implemented Falcon DSA (#179).
|
||||
* Added conditional `serde`` support for various structs (#180).
|
||||
* Implemented benchmarking for `TieredSmt` (#182).
|
||||
* Added more leaf traversal methods for `MerkleStore` (#185).
|
||||
* Added SVE acceleration for RPO hash function (#189).
|
||||
- Replaced `MerklePathSet` with `PartialMerkleTree` (#165).
|
||||
- Implemented clearing of nodes in `TieredSmt` (#173).
|
||||
- Added ability to generate inclusion proofs for `TieredSmt` (#174).
|
||||
- Implemented Falcon DSA (#179).
|
||||
- Added conditional `serde`` support for various structs (#180).
|
||||
- Implemented benchmarking for `TieredSmt` (#182).
|
||||
- Added more leaf traversal methods for `MerkleStore` (#185).
|
||||
- Added SVE acceleration for RPO hash function (#189).
|
||||
|
||||
## 0.6.0 (2023-06-25)
|
||||
|
||||
* [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
|
||||
* [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
|
||||
* Added initial implementation of `PartialMerkleTree` (#156).
|
||||
- [BREAKING] Added support for recording capabilities for `MerkleStore` (#162).
|
||||
- [BREAKING] Refactored Merkle struct APIs to use `RpoDigest` instead of `Word` (#157).
|
||||
- Added initial implementation of `PartialMerkleTree` (#156).
|
||||
|
||||
## 0.5.0 (2023-05-26)
|
||||
|
||||
* Implemented `TieredSmt` (#152, #153).
|
||||
* Implemented ability to extract a subset of a `MerkleStore` (#151).
|
||||
* Cleaned up `SimpleSmt` interface (#149).
|
||||
* Decoupled hashing and padding of peaks in `Mmr` (#148).
|
||||
* Added `inner_nodes()` to `MerkleStore` (#146).
|
||||
- Implemented `TieredSmt` (#152, #153).
|
||||
- Implemented ability to extract a subset of a `MerkleStore` (#151).
|
||||
- Cleaned up `SimpleSmt` interface (#149).
|
||||
- Decoupled hashing and padding of peaks in `Mmr` (#148).
|
||||
- Added `inner_nodes()` to `MerkleStore` (#146).
|
||||
|
||||
## 0.4.0 (2023-04-21)
|
||||
|
||||
|
@ -138,6 +148,6 @@
|
|||
|
||||
- Initial release on crates.io containing the cryptographic primitives used in Miden VM and the Miden Rollup.
|
||||
- Hash module with the BLAKE3 and Rescue Prime Optimized hash functions.
|
||||
- BLAKE3 is implemented with 256-bit, 192-bit, or 160-bit output.
|
||||
- RPO is implemented with 256-bit output.
|
||||
- BLAKE3 is implemented with 256-bit, 192-bit, or 160-bit output.
|
||||
- RPO is implemented with 256-bit output.
|
||||
- Merkle module, with a set of data structures related to Merkle trees, implemented using the RPO hash function.
|
||||
|
|
82
Cargo.lock
generated
82
Cargo.lock
generated
|
@ -80,9 +80,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
|||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.3.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
|
@ -147,9 +147,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.1.21"
|
||||
version = "1.1.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0"
|
||||
checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945"
|
||||
dependencies = [
|
||||
"jobserver",
|
||||
"libc",
|
||||
|
@ -191,9 +191,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.18"
|
||||
version = "4.5.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3"
|
||||
checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
|
@ -201,9 +201,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.18"
|
||||
version = "4.5.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b"
|
||||
checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
|
@ -465,9 +465,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.70"
|
||||
version = "0.3.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
|
||||
checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
@ -489,9 +489,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.159"
|
||||
version = "0.2.161"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
|
||||
checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
|
@ -519,7 +519,7 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
|||
|
||||
[[package]]
|
||||
name = "miden-crypto"
|
||||
version = "0.10.3"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"cc",
|
||||
|
@ -619,9 +619,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.19.0"
|
||||
version = "1.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
|
@ -668,9 +668,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.86"
|
||||
version = "1.0.88"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
|
||||
checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
@ -771,9 +771,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.6"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -783,9 +783,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.7"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
|
||||
checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -794,9 +794,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.4"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
|
||||
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
|
@ -866,9 +866,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.128"
|
||||
version = "1.0.129"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
|
||||
checksum = "6dbcf9b78a125ee667ae19388837dd12294b858d101fdd393cb9d5501ef09eb2"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
|
@ -900,9 +900,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
|
|||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.77"
|
||||
version = "2.0.79"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
|
||||
checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -911,9 +911,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.12.0"
|
||||
version = "3.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
|
||||
checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
|
@ -989,9 +989,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.93"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
|
||||
checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
|
@ -1000,9 +1000,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.93"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
|
||||
checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
|
@ -1015,9 +1015,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.93"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
|
||||
checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
|
@ -1025,9 +1025,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.93"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
|
||||
checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -1038,15 +1038,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.93"
|
||||
version = "0.2.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
|
||||
checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.70"
|
||||
version = "0.3.72"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0"
|
||||
checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
|
|
10
Cargo.toml
10
Cargo.toml
|
@ -1,16 +1,16 @@
|
|||
[package]
|
||||
name = "miden-crypto"
|
||||
version = "0.10.3"
|
||||
version = "0.11.0"
|
||||
description = "Miden Cryptographic primitives"
|
||||
authors = ["miden contributors"]
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/0xPolygonMiden/crypto"
|
||||
documentation = "https://docs.rs/miden-crypto/0.10.3"
|
||||
documentation = "https://docs.rs/miden-crypto/0.11.0"
|
||||
categories = ["cryptography", "no-std"]
|
||||
keywords = ["miden", "crypto", "hash", "merkle"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.82"
|
||||
|
||||
[[bin]]
|
||||
name = "miden-crypto"
|
||||
|
@ -63,11 +63,11 @@ winter-utils = { version = "0.9", default-features = false }
|
|||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"] }
|
||||
proptest = "1.4"
|
||||
proptest = "1.5"
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
rand-utils = { version = "0.9", package = "winter-rand-utils" }
|
||||
seq-macro = { version = "0.3" }
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", optional = true, features = ["parallel"] }
|
||||
cc = { version = "1.1", optional = true, features = ["parallel"] }
|
||||
glob = "0.3"
|
||||
|
|
86
Makefile
Normal file
86
Makefile
Normal file
|
@ -0,0 +1,86 @@
|
|||
.DEFAULT_GOAL := help
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
# -- variables --------------------------------------------------------------------------------------
|
||||
|
||||
WARNINGS=RUSTDOCFLAGS="-D warnings"
|
||||
DEBUG_OVERFLOW_INFO=RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2"
|
||||
|
||||
# -- linting --------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: ## Run Clippy with configs
|
||||
$(WARNINGS) cargo +nightly clippy --workspace --all-targets --all-features
|
||||
|
||||
|
||||
.PHONY: fix
|
||||
fix: ## Run Fix with configs
|
||||
cargo +nightly fix --allow-staged --allow-dirty --all-targets --all-features
|
||||
|
||||
|
||||
.PHONY: format
|
||||
format: ## Run Format using nightly toolchain
|
||||
cargo +nightly fmt --all
|
||||
|
||||
|
||||
.PHONY: format-check
|
||||
format-check: ## Run Format using nightly toolchain but only in check mode
|
||||
cargo +nightly fmt --all --check
|
||||
|
||||
|
||||
.PHONY: lint
|
||||
lint: format fix clippy ## Run all linting tasks at once (Clippy, fixing, formatting)
|
||||
|
||||
# --- docs ----------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: doc
|
||||
doc: ## Generate and check documentation
|
||||
$(WARNINGS) cargo doc --all-features --keep-going --release
|
||||
|
||||
# --- testing -------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test-default
|
||||
test-default: ## Run tests with default features
|
||||
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --all-features
|
||||
|
||||
|
||||
.PHONY: test-no-std
|
||||
test-no-std: ## Run tests with `no-default-features` (std)
|
||||
$(DEBUG_OVERFLOW_INFO) cargo nextest run --profile default --release --no-default-features
|
||||
|
||||
|
||||
.PHONY: test
|
||||
test: test-default test-no-std ## Run all tests
|
||||
|
||||
# --- checking ------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: check
|
||||
check: ## Check all targets and features for errors without code generation
|
||||
cargo check --all-targets --all-features
|
||||
|
||||
# --- building ------------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build with default features enabled
|
||||
cargo build --release
|
||||
|
||||
.PHONY: build-no-std
|
||||
build-no-std: ## Build without the standard library
|
||||
cargo build --release --no-default-features --target wasm32-unknown-unknown
|
||||
|
||||
.PHONY: build-avx2
|
||||
build-avx2: ## Build with avx2 support
|
||||
RUSTFLAGS="-C target-feature=+avx2" cargo build --release
|
||||
|
||||
.PHONY: build-sve
|
||||
build-sve: ## Build with sve support
|
||||
RUSTFLAGS="-C target-feature=+sve" cargo build --release
|
||||
|
||||
# --- benchmarking --------------------------------------------------------------------------------
|
||||
|
||||
.PHONY: bench-tx
|
||||
bench-tx: ## Run crypto benchmarks
|
||||
cargo bench
|
|
@ -1,86 +0,0 @@
|
|||
# Cargo Makefile
|
||||
|
||||
# -- linting --------------------------------------------------------------------------------------
|
||||
[tasks.format]
|
||||
toolchain = "nightly"
|
||||
command = "cargo"
|
||||
args = ["fmt", "--all"]
|
||||
|
||||
[tasks.format-check]
|
||||
toolchain = "nightly"
|
||||
command = "cargo"
|
||||
args = ["fmt", "--all", "--", "--check"]
|
||||
|
||||
[tasks.clippy-default]
|
||||
command = "cargo"
|
||||
args = ["clippy","--workspace", "--all-targets", "--", "-D", "clippy::all", "-D", "warnings"]
|
||||
|
||||
[tasks.clippy-all-features]
|
||||
command = "cargo"
|
||||
args = ["clippy","--workspace", "--all-targets", "--all-features", "--", "-D", "clippy::all", "-D", "warnings"]
|
||||
|
||||
[tasks.clippy]
|
||||
dependencies = [
|
||||
"clippy-default",
|
||||
"clippy-all-features"
|
||||
]
|
||||
|
||||
[tasks.fix]
|
||||
description = "Runs Fix"
|
||||
command = "cargo"
|
||||
toolchain = "nightly"
|
||||
args = ["fix", "--allow-staged", "--allow-dirty", "--all-targets", "--all-features"]
|
||||
|
||||
[tasks.lint]
|
||||
description = "Runs all linting tasks (Clippy, fixing, formatting)"
|
||||
run_task = { name = ["format", "format-check", "clippy", "docs"] }
|
||||
|
||||
# --- docs ----------------------------------------------------------------------------------------
|
||||
[tasks.doc]
|
||||
env = { "RUSTDOCFLAGS" = "-D warnings" }
|
||||
command = "cargo"
|
||||
args = ["doc", "--all-features", "--keep-going", "--release"]
|
||||
|
||||
# --- testing -------------------------------------------------------------------------------------
|
||||
[tasks.test]
|
||||
description = "Run tests with default features"
|
||||
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
|
||||
workspace = false
|
||||
command = "cargo"
|
||||
args = ["test", "--release"]
|
||||
|
||||
[tasks.test-no-default-features]
|
||||
description = "Run tests with no-default-features"
|
||||
env = { "RUSTFLAGS" = "-C debug-assertions -C overflow-checks -C debuginfo=2" }
|
||||
workspace = false
|
||||
command = "cargo"
|
||||
args = ["test", "--release", "--no-default-features"]
|
||||
|
||||
[tasks.test-all]
|
||||
description = "Run all tests"
|
||||
workspace = false
|
||||
run_task = { name = ["test", "test-no-default-features"], parallel = true }
|
||||
|
||||
# --- building ------------------------------------------------------------------------------------
|
||||
[tasks.build]
|
||||
description = "Build in release mode"
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.build-no-std]
|
||||
description = "Build using no-std"
|
||||
command = "cargo"
|
||||
args = ["build", "--release", "--no-default-features", "--target", "wasm32-unknown-unknown"]
|
||||
|
||||
[tasks.build-avx2]
|
||||
description = "Build using AVX2 acceleration"
|
||||
env = { "RUSTFLAGS" = "-C target-feature=+avx2" }
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
||||
[tasks.build-sve]
|
||||
description = "Build with SVE acceleration"
|
||||
env = { "RUSTFLAGS" = "-C target-feature=+sve" }
|
||||
command = "cargo"
|
||||
args = ["build", "--release"]
|
||||
|
76
README.md
76
README.md
|
@ -2,85 +2,107 @@
|
|||
|
||||
[](https://github.com/0xPolygonMiden/crypto/blob/main/LICENSE)
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/test.yml)
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/no-std.yml)
|
||||
[]()
|
||||
[](https://github.com/0xPolygonMiden/crypto/actions/workflows/build.yml)
|
||||
[](https://www.rust-lang.org/tools/install)
|
||||
[](https://crates.io/crates/miden-crypto)
|
||||
|
||||
This crate contains cryptographic primitives used in Polygon Miden.
|
||||
|
||||
## Hash
|
||||
|
||||
[Hash module](./src/hash) provides a set of cryptographic hash functions which are used by the Miden VM and the Miden rollup. Currently, these functions are:
|
||||
|
||||
* [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
|
||||
* [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
|
||||
* [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
|
||||
- [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) hash function with 256-bit, 192-bit, or 160-bit output. The 192-bit and 160-bit outputs are obtained by truncating the 256-bit output of the standard BLAKE3.
|
||||
- [RPO](https://eprint.iacr.org/2022/1577) hash function with 256-bit output. This hash function is an algebraic hash function suitable for recursive STARKs.
|
||||
- [RPX](https://eprint.iacr.org/2023/1045) hash function with 256-bit output. Similar to RPO, this hash function is suitable for recursive STARKs but it is about 2x faster as compared to RPO.
|
||||
|
||||
For performance benchmarks of these hash functions and their comparison to other popular hash functions please see [here](./benches/).
|
||||
|
||||
## Merkle
|
||||
|
||||
[Merkle module](./src/merkle/) provides a set of data structures related to Merkle trees. All these data structures are implemented using the RPO hash function described above. The data structures are:
|
||||
|
||||
* `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
|
||||
* `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
|
||||
* `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
|
||||
* `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
|
||||
* `PartialMmr`: a partial view of a Merkle mountain range structure.
|
||||
* `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
|
||||
* `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
|
||||
- `MerkleStore`: a collection of Merkle trees of different heights designed to efficiently store trees with common subtrees. When instantiated with `RecordingMap`, a Merkle store records all accesses to the original data.
|
||||
- `MerkleTree`: a regular fully-balanced binary Merkle tree. The depth of this tree can be at most 64.
|
||||
- `Mmr`: a Merkle mountain range structure designed to function as an append-only log.
|
||||
- `PartialMerkleTree`: a partial view of a Merkle tree where some sub-trees may not be known. This is similar to a collection of Merkle paths all resolving to the same root. The length of the paths can be at most 64.
|
||||
- `PartialMmr`: a partial view of a Merkle mountain range structure.
|
||||
- `SimpleSmt`: a Sparse Merkle Tree (with no compaction), mapping 64-bit keys to 4-element values.
|
||||
- `Smt`: a Sparse Merkle tree (with compaction at depth 64), mapping 4-element keys to 4-element values.
|
||||
|
||||
The module also contains additional supporting components such as `NodeIndex`, `MerklePath`, and `MerkleError` to assist with tree indexation, opening proofs, and reporting inconsistent arguments/state.
|
||||
The module also contains additional supporting components such as `NodeIndex`, `MerklePath`, and `MerkleError` to assist with tree indexation, opening proofs, and reporting inconsistent arguments/state.
|
||||
|
||||
## Signatures
|
||||
|
||||
[DSA module](./src/dsa) provides a set of digital signature schemes supported by default in the Miden VM. Currently, these schemes are:
|
||||
|
||||
* `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the *hash-to-point* algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
|
||||
- `RPO Falcon512`: a variant of the [Falcon](https://falcon-sign.info/) signature scheme. This variant differs from the standard in that instead of using SHAKE256 hash function in the _hash-to-point_ algorithm we use RPO256. This makes the signature more efficient to verify in Miden VM.
|
||||
|
||||
For the above signatures, key generation, signing, and signature verification are available for both `std` and `no_std` contexts (see [crate features](#crate-features) below). However, in `no_std` context, the user is responsible for supplying the key generation and signing procedures with a random number generator.
|
||||
|
||||
## Pseudo-Random Element Generator
|
||||
|
||||
[Pseudo random element generator module](./src/rand/) provides a set of traits and data structures that facilitate generating pseudo-random elements in the context of Miden VM and Miden rollup. The module currently includes:
|
||||
|
||||
* `FeltRng`: a trait for generating random field elements and random 4 field elements.
|
||||
* `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
|
||||
* `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
|
||||
|
||||
- `FeltRng`: a trait for generating random field elements and random 4 field elements.
|
||||
- `RpoRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPO hash function.
|
||||
- `RpxRandomCoin`: a struct implementing `FeltRng` as well as the [`RandomCoin`](https://github.com/facebook/winterfell/blob/main/crypto/src/random/mod.rs) trait using RPX hash function.
|
||||
|
||||
## Make commands
|
||||
|
||||
We use `make` to automate building, testing, and other processes. In most cases, `make` commands are wrappers around `cargo` commands with specific arguments. You can view the list of available commands in the [Makefile](Makefile), or run the following command:
|
||||
|
||||
```shell
|
||||
make
|
||||
```
|
||||
|
||||
## Crate features
|
||||
|
||||
This crate can be compiled with the following features:
|
||||
|
||||
* `std` - enabled by default and relies on the Rust standard library.
|
||||
* `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
|
||||
- `std` - enabled by default and relies on the Rust standard library.
|
||||
- `no_std` does not rely on the Rust standard library and enables compilation to WebAssembly.
|
||||
|
||||
Both of these features imply the use of [alloc](https://doc.rust-lang.org/alloc/) to support heap-allocated collections.
|
||||
|
||||
To compile with `no_std`, disable default features via `--no-default-features` flag.
|
||||
To compile with `no_std`, disable default features via `--no-default-features` flag or using the following command:
|
||||
|
||||
```shell
|
||||
make build-no-std
|
||||
```
|
||||
|
||||
### AVX2 acceleration
|
||||
|
||||
On platforms with [AVX2](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable AVX2 acceleration, the code needs to be compiled with the `avx2` target feature enabled. For example:
|
||||
|
||||
```shell
|
||||
cargo make build-avx2
|
||||
make build-avx2
|
||||
```
|
||||
|
||||
### SVE acceleration
|
||||
On platforms with [SVE](https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
|
||||
|
||||
On platforms with [SVE](<https://en.wikipedia.org/wiki/AArch64#Scalable_Vector_Extension_(SVE)>) support, RPO and RPX hash function can be accelerated by using the vector processing unit. To enable SVE acceleration, the code needs to be compiled with the `sve` target feature enabled. For example:
|
||||
|
||||
```shell
|
||||
cargo make build-sve
|
||||
make build-sve
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
The best way to test the library is using our `Makefile.toml` and [cargo-make](https://github.com/sagiegurari/cargo-make), this will enable you to use our pre-defined optimized testing commands:
|
||||
The best way to test the library is using our [Makefile](Makefile), this will enable you to use our pre-defined optimized testing commands:
|
||||
|
||||
```shell
|
||||
cargo make test-all
|
||||
make test
|
||||
```
|
||||
|
||||
For example, some of the functions are heavy and might take a while for the tests to complete if using simply `cargo test`. In order to test in release and optimized mode, we have to replicate the test conditions of the development mode so all debug assertions can be verified.
|
||||
|
||||
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile.toml](Makefile.toml)):
|
||||
We do that by enabling some special [flags](https://doc.rust-lang.org/cargo/reference/profiles.html) for the compilation (which we have set as a default in our [Makefile](Makefile)):
|
||||
|
||||
```shell
|
||||
RUSTFLAGS="-C debug-assertions -C overflow-checks -C debuginfo=2" cargo test --release
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is [MIT licensed](./LICENSE).
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
1.80
|
5
rust-toolchain.toml
Normal file
5
rust-toolchain.toml
Normal file
|
@ -0,0 +1,5 @@
|
|||
[toolchain]
|
||||
channel = "1.82"
|
||||
components = ["rustfmt", "rust-src", "clippy"]
|
||||
targets = ["wasm32-unknown-unknown"]
|
||||
profile = "minimal"
|
24
rustfmt.toml
24
rustfmt.toml
|
@ -2,20 +2,22 @@ edition = "2021"
|
|||
array_width = 80
|
||||
attr_fn_like_width = 80
|
||||
chain_width = 80
|
||||
#condense_wildcard_suffixes = true
|
||||
#enum_discrim_align_threshold = 40
|
||||
comment_width = 100
|
||||
condense_wildcard_suffixes = true
|
||||
fn_call_width = 80
|
||||
#fn_single_line = true
|
||||
#format_code_in_doc_comments = true
|
||||
#format_macro_matchers = true
|
||||
#format_strings = true
|
||||
#group_imports = "StdExternalCrate"
|
||||
#hex_literal_case = "Lower"
|
||||
#imports_granularity = "Crate"
|
||||
format_code_in_doc_comments = true
|
||||
format_macro_matchers = true
|
||||
group_imports = "StdExternalCrate"
|
||||
hex_literal_case = "Lower"
|
||||
imports_granularity = "Crate"
|
||||
match_block_trailing_comma = true
|
||||
newline_style = "Unix"
|
||||
#normalize_doc_attributes = true
|
||||
#reorder_impl_items = true
|
||||
reorder_imports = true
|
||||
reorder_modules = true
|
||||
single_line_if_else_max_width = 60
|
||||
single_line_let_else_max_width = 60
|
||||
struct_lit_width = 40
|
||||
struct_variant_width = 40
|
||||
use_field_init_shorthand = true
|
||||
use_try_shorthand = true
|
||||
wrap_comments = true
|
||||
|
|
21
scripts/check-changelog.sh
Executable file
21
scripts/check-changelog.sh
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
set -uo pipefail
|
||||
|
||||
CHANGELOG_FILE="${1:-CHANGELOG.md}"
|
||||
|
||||
if [ "${NO_CHANGELOG_LABEL}" = "true" ]; then
|
||||
# 'no changelog' set, so finish successfully
|
||||
echo "\"no changelog\" label has been set"
|
||||
exit 0
|
||||
else
|
||||
# a changelog check is required
|
||||
# fail if the diff is empty
|
||||
if git diff --exit-code "origin/${BASE_REF}" -- "${CHANGELOG_FILE}"; then
|
||||
>&2 echo "Changes should come with an entry in the \"CHANGELOG.md\" file. This behavior
|
||||
can be overridden by using the \"no changelog\" label, which is used for changes
|
||||
that are trivial / explicitely stated not to require a changelog entry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "The \"CHANGELOG.md\" file has been updated."
|
||||
fi
|
|
@ -1,10 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Check rust-toolchain file
|
||||
TOOLCHAIN_VERSION=$(cat rust-toolchain)
|
||||
# Get rust-toolchain.toml file channel
|
||||
TOOLCHAIN_VERSION=$(grep 'channel' rust-toolchain.toml | sed -E 's/.*"(.*)".*/\1/')
|
||||
|
||||
# Check workspace Cargo.toml file
|
||||
CARGO_VERSION=$(cat Cargo.toml | grep "rust-version" | cut -d '"' -f 2)
|
||||
# Get workspace Cargo.toml file rust-version
|
||||
CARGO_VERSION=$(grep 'rust-version' Cargo.toml | sed -E 's/.*"(.*)".*/\1/')
|
||||
|
||||
# Check version match
|
||||
if [ "$CARGO_VERSION" != "$TOOLCHAIN_VERSION" ]; then
|
||||
echo "Mismatch in Cargo.toml: Expected $TOOLCHAIN_VERSION, found $CARGO_VERSION"
|
||||
exit 1
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{math::FalconFelt, Nonce, Polynomial, Rpo256, Word, MODULUS, N, ZERO};
|
||||
|
||||
// HASH-TO-POINT FUNCTIONS
|
||||
// ================================================================================================
|
||||
|
||||
|
|
|
@ -15,12 +15,13 @@ pub use secret_key::SecretKey;
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use winter_math::FieldElement;
|
||||
use winter_utils::{Deserializable, Serializable};
|
||||
|
||||
use crate::{dsa::rpo_falcon512::SecretKey, Word, ONE};
|
||||
|
||||
#[test]
|
||||
fn test_falcon_verification() {
|
||||
let seed = [0_u8; 32];
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
|
||||
use alloc::string::ToString;
|
||||
use core::ops::Deref;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{
|
||||
super::{Rpo256, LOG_N, N, PK_LEN},
|
||||
ByteReader, ByteWriter, Deserializable, DeserializationError, FalconFelt, Felt, Polynomial,
|
||||
Serializable, Signature, Word,
|
||||
};
|
||||
use alloc::string::ToString;
|
||||
use core::ops::Deref;
|
||||
use num::Zero;
|
||||
use crate::dsa::rpo_falcon512::FALCON_ENCODING_BITS;
|
||||
|
||||
// PUBLIC KEY
|
||||
// ================================================================================================
|
||||
|
@ -116,7 +117,7 @@ impl Deserializable for PubKeyPoly {
|
|||
|
||||
if acc_len >= FALCON_ENCODING_BITS {
|
||||
acc_len -= FALCON_ENCODING_BITS;
|
||||
let w = (acc >> acc_len) & 0x3FFF;
|
||||
let w = (acc >> acc_len) & 0x3fff;
|
||||
let element = w.try_into().map_err(|err| {
|
||||
DeserializationError::InvalidValue(format!(
|
||||
"Failed to decode public key: {err}"
|
||||
|
|
|
@ -1,3 +1,11 @@
|
|||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use num::Complex;
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
use super::{
|
||||
super::{
|
||||
math::{ffldl, ffsampling, gram, normalize_tree, FalconFelt, FastFft, LdlTree, Polynomial},
|
||||
|
@ -10,13 +18,6 @@ use super::{
|
|||
use crate::dsa::rpo_falcon512::{
|
||||
hash_to_point::hash_to_point_rpo256, math::ntru_gen, SIG_NONCE_LEN, SK_LEN,
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use num::Complex;
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
@ -27,14 +28,15 @@ const WIDTH_SMALL_POLY_COEFFICIENT: usize = 6;
|
|||
// SECRET KEY
|
||||
// ================================================================================================
|
||||
|
||||
/// The secret key is a quadruple [[g, -f], [G, -F]] of polynomials with integer coefficients.
|
||||
/// Represents the secret key for Falcon DSA.
|
||||
///
|
||||
/// Each polynomial is of degree at most N = 512 and computations with these polynomials are done
|
||||
/// modulo the monic irreducible polynomial ϕ = x^N + 1. The secret key is a basis for a lattice
|
||||
/// and has the property of being short with respect to a certain norm and an upper bound
|
||||
/// appropriate for a given security parameter. The public key on the other hand is another basis
|
||||
/// for the same lattice and can be described by a single polynomial h with integer coefficients
|
||||
/// modulo ϕ. The two keys are related by the following relation:
|
||||
/// The secret key is a quadruple [[g, -f], [G, -F]] of polynomials with integer coefficients. Each
|
||||
/// polynomial is of degree at most N = 512 and computations with these polynomials is done modulo
|
||||
/// the monic irreducible polynomial ϕ = x^N + 1. The secret key is a basis for a lattice and has
|
||||
/// the property of being short with respect to a certain norm and an upper bound appropriate for
|
||||
/// a given security parameter. The public key on the other hand is another basis for the same
|
||||
/// lattice and can be described by a single polynomial h with integer coefficients modulo ϕ.
|
||||
/// The two keys are related by the following relation:
|
||||
///
|
||||
/// 1. h = g /f [mod ϕ][mod p]
|
||||
/// 2. f.G - g.F = p [mod ϕ]
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
|
||||
use alloc::boxed::Box;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{One, Zero};
|
||||
use num_complex::{Complex, Complex64};
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
|
||||
|
||||
const SIGMIN: f64 = 1.2778336969128337;
|
||||
|
||||
|
@ -80,11 +81,11 @@ pub fn normalize_tree(tree: &mut LdlTree, sigma: f64) {
|
|||
LdlTree::Branch(_ell, left, right) => {
|
||||
normalize_tree(left, sigma);
|
||||
normalize_tree(right, sigma);
|
||||
}
|
||||
},
|
||||
LdlTree::Leaf(vector) => {
|
||||
vector[0] = Complex::new(sigma / vector[0].re.sqrt(), 0.0);
|
||||
vector[1] = Complex64::zero();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +111,7 @@ pub fn ffsampling<R: Rng>(
|
|||
let z0 = Polynomial::<Complex64>::merge_fft(&bold_z0.0, &bold_z0.1);
|
||||
|
||||
(z0, z1)
|
||||
}
|
||||
},
|
||||
LdlTree::Leaf(value) => {
|
||||
let z0 = sampler_z(t.0.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
|
||||
let z1 = sampler_z(t.1.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
|
||||
|
@ -118,6 +119,6 @@ pub fn ffsampling<R: Rng>(
|
|||
Polynomial::new(vec![Complex64::new(z0 as f64, 0.0)]),
|
||||
Polynomial::new(vec![Complex64::new(z1 as f64, 0.0)]),
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
|
||||
use alloc::vec::Vec;
|
||||
use core::{
|
||||
f64::consts::PI,
|
||||
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
|
||||
};
|
||||
use num::{One, Zero};
|
||||
use num_complex::Complex64;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{One, Zero};
|
||||
use num_complex::Complex64;
|
||||
|
||||
use super::{field::FalconFelt, polynomial::Polynomial, Inverse};
|
||||
|
||||
/// Implements Cyclotomic FFT without bitreversing the outputs, and using precomputed powers of the
|
||||
/// 2n-th primitive root of unity.
|
||||
|
@ -73,7 +74,7 @@ where
|
|||
rev
|
||||
}
|
||||
|
||||
/// Computes the first n powers of the 2nth root of unity, and put them in bit-reversed order.
|
||||
/// Computes the first n powers of the 2nd root of unity, and put them in bit-reversed order.
|
||||
#[allow(dead_code)]
|
||||
fn bitreversed_powers(n: usize) -> Vec<Self> {
|
||||
let psi = Self::primitive_root_of_unity(2 * n);
|
||||
|
@ -87,7 +88,7 @@ where
|
|||
array
|
||||
}
|
||||
|
||||
/// Computes the first n powers of the 2nth root of unity, invert them, and put them in
|
||||
/// Computes the first n powers of the 2nd root of unity, invert them, and put them in
|
||||
/// bit-reversed order.
|
||||
#[allow(dead_code)]
|
||||
fn bitreversed_powers_inverse(n: usize) -> Vec<Self> {
|
||||
|
@ -102,7 +103,8 @@ where
|
|||
array
|
||||
}
|
||||
|
||||
/// Reorders the given elements in the array by reversing the binary expansions of their indices.
|
||||
/// Reorders the given elements in the array by reversing the binary expansions of their
|
||||
/// indices.
|
||||
fn bitreverse_array<T>(array: &mut [T]) {
|
||||
let n = array.len();
|
||||
for i in 0..n {
|
||||
|
@ -118,19 +120,14 @@ where
|
|||
///
|
||||
/// Arguments:
|
||||
///
|
||||
/// - a : &mut [Self]
|
||||
/// (a reference to) a mutable array of field elements which is to
|
||||
/// be transformed under the FFT. The transformation happens in-
|
||||
/// place.
|
||||
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
|
||||
/// transformed under the FFT. The transformation happens in- place.
|
||||
///
|
||||
/// - psi_rev: &[Self]
|
||||
/// (a reference to) an array of powers of psi, from 0 to n-1,
|
||||
/// but ordered by bit-reversed index. Here psi is a primitive root
|
||||
/// of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this
|
||||
/// trait implementation is not const. For the performance benefit
|
||||
/// you want a precompiled array, which you can get if you can get
|
||||
/// by implementing the same method and marking it "const".
|
||||
/// - psi_rev: &[Self] (a reference to) an array of powers of psi, from 0 to n-1, but ordered
|
||||
/// by bit-reversed index. Here psi is a primitive root of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this trait implementation is not
|
||||
/// const. For the performance benefit you want a precompiled array, which you can get if you
|
||||
/// can get by implementing the same method and marking it "const".
|
||||
fn fft(a: &mut [Self], psi_rev: &[Self]) {
|
||||
let n = a.len();
|
||||
let mut t = n;
|
||||
|
@ -158,20 +155,15 @@ where
|
|||
///
|
||||
/// Arguments:
|
||||
///
|
||||
/// - a : &mut [Self]
|
||||
/// (a reference to) a mutable array of field elements which is to
|
||||
/// be transformed under the IFFT. The transformation happens in-
|
||||
/// place.
|
||||
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
|
||||
/// transformed under the IFFT. The transformation happens in- place.
|
||||
///
|
||||
/// - psi_inv_rev: &[Self]
|
||||
/// (a reference to) an array of powers of psi^-1, from 0 to n-1,
|
||||
/// but ordered by bit-reversed index. Here psi is a primitive root of
|
||||
/// order 2n. You can use
|
||||
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for
|
||||
/// this purpose, but this trait implementation is not const. For
|
||||
/// the performance benefit you want a precompiled array, which you
|
||||
/// can get if you can get by implementing the same methods and marking
|
||||
/// them "const".
|
||||
/// - psi_inv_rev: &[Self] (a reference to) an array of powers of psi^-1, from 0 to n-1, but
|
||||
/// ordered by bit-reversed index. Here psi is a primitive root of order 2n. You can use
|
||||
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for this purpose, but this
|
||||
/// trait implementation is not const. For the performance benefit you want a precompiled
|
||||
/// array, which you can get if you can get by implementing the same methods and marking them
|
||||
/// "const".
|
||||
fn ifft(a: &mut [Self], psi_inv_rev: &[Self], ninv: Self) {
|
||||
let n = a.len();
|
||||
let mut t = 1;
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
|
||||
use alloc::string::String;
|
||||
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
|
||||
use num::{One, Zero};
|
||||
|
||||
use super::{fft::CyclotomicFourier, Inverse, MODULUS};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct FalconFelt(u32);
|
||||
|
||||
|
|
|
@ -2,17 +2,19 @@
|
|||
//!
|
||||
//! It uses and acknowledges the work in:
|
||||
//!
|
||||
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas Pornin.
|
||||
//! 1. The [reference](https://falcon-sign.info/impl/README.txt.html) implementation by Thomas
|
||||
//! Pornin.
|
||||
//! 2. The [Rust](https://github.com/aszepieniec/falcon-rust) implementation by Alan Szepieniec.
|
||||
use super::MODULUS;
|
||||
use alloc::{string::String, vec::Vec};
|
||||
use core::ops::MulAssign;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use num::{BigInt, FromPrimitive, One, Zero};
|
||||
use num_complex::Complex64;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use super::MODULUS;
|
||||
|
||||
mod fft;
|
||||
pub use fft::{CyclotomicFourier, FastFft};
|
||||
|
@ -152,7 +154,7 @@ fn ntru_solve(
|
|||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,18 @@
|
|||
use super::{field::FalconFelt, Inverse};
|
||||
use crate::dsa::rpo_falcon512::{MODULUS, N};
|
||||
use crate::Felt;
|
||||
use alloc::vec::Vec;
|
||||
use core::default::Default;
|
||||
use core::fmt::Debug;
|
||||
use core::ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
use core::{
|
||||
default::Default,
|
||||
fmt::Debug,
|
||||
ops::{Add, AddAssign, Div, Mul, MulAssign, Neg, Sub, SubAssign},
|
||||
};
|
||||
|
||||
use num::{One, Zero};
|
||||
|
||||
use super::{field::FalconFelt, Inverse};
|
||||
use crate::{
|
||||
dsa::rpo_falcon512::{MODULUS, N},
|
||||
Felt,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Polynomial<F> {
|
||||
pub coefficients: Vec<F>,
|
||||
|
@ -134,8 +140,8 @@ impl<
|
|||
Self::new(coefficients)
|
||||
}
|
||||
|
||||
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 > ,
|
||||
/// which corresponds to f(x^2).
|
||||
/// Computes the galois adjoint of the polynomial in the cyclotomic ring F\[ X \] / < X^n + 1 >
|
||||
/// , which corresponds to f(x^2).
|
||||
pub fn galois_adjoint(&self) -> Self {
|
||||
Self::new(
|
||||
self.coefficients
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use core::f64::consts::LN_2;
|
||||
use rand::Rng;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
use num::Float;
|
||||
use rand::Rng;
|
||||
|
||||
/// Samples an integer from {0, ..., 18} according to the distribution χ, which is close to
|
||||
/// the half-Gaussian distribution on the natural numbers with mean 0 and standard deviation
|
||||
|
@ -40,18 +40,18 @@ fn approx_exp(x: f64, ccs: f64) -> u64 {
|
|||
// https://eprint.iacr.org/2018/1234
|
||||
// https://github.com/raykzhao/gaussian
|
||||
const C: [u64; 13] = [
|
||||
0x00000004741183A3u64,
|
||||
0x00000036548CFC06u64,
|
||||
0x0000024FDCBF140Au64,
|
||||
0x0000171D939DE045u64,
|
||||
0x0000D00CF58F6F84u64,
|
||||
0x000680681CF796E3u64,
|
||||
0x002D82D8305B0FEAu64,
|
||||
0x011111110E066FD0u64,
|
||||
0x0555555555070F00u64,
|
||||
0x155555555581FF00u64,
|
||||
0x400000000002B400u64,
|
||||
0x7FFFFFFFFFFF4800u64,
|
||||
0x00000004741183a3u64,
|
||||
0x00000036548cfc06u64,
|
||||
0x0000024fdcbf140au64,
|
||||
0x0000171d939de045u64,
|
||||
0x0000d00cf58f6f84u64,
|
||||
0x000680681cf796e3u64,
|
||||
0x002d82d8305b0feau64,
|
||||
0x011111110e066fd0u64,
|
||||
0x0555555555070f00u64,
|
||||
0x155555555581ff00u64,
|
||||
0x400000000002b400u64,
|
||||
0x7fffffffffff4800u64,
|
||||
0x8000000000000000u64,
|
||||
];
|
||||
|
||||
|
@ -116,9 +116,10 @@ pub(crate) fn sampler_z<R: Rng>(mu: f64, sigma: f64, sigma_min: f64, rng: &mut R
|
|||
#[cfg(all(test, feature = "std"))]
|
||||
mod test {
|
||||
use alloc::vec::Vec;
|
||||
use rand::RngCore;
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
use rand::RngCore;
|
||||
|
||||
use super::{approx_exp, ber_exp, sampler_z};
|
||||
|
||||
/// RNG used only for testing purposes, whereby the produced
|
||||
|
|
|
@ -9,9 +9,11 @@ mod keys;
|
|||
mod math;
|
||||
mod signature;
|
||||
|
||||
pub use self::keys::{PubKeyPoly, PublicKey, SecretKey};
|
||||
pub use self::math::Polynomial;
|
||||
pub use self::signature::{Signature, SignatureHeader, SignaturePoly};
|
||||
pub use self::{
|
||||
keys::{PubKeyPoly, PublicKey, SecretKey},
|
||||
math::Polynomial,
|
||||
signature::{Signature, SignatureHeader, SignaturePoly},
|
||||
};
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use alloc::{string::ToString, vec::Vec};
|
||||
use core::ops::Deref;
|
||||
|
||||
use num::Zero;
|
||||
|
||||
use super::{
|
||||
hash_to_point::hash_to_point_rpo256,
|
||||
keys::PubKeyPoly,
|
||||
|
@ -8,7 +10,6 @@ use super::{
|
|||
ByteReader, ByteWriter, Deserializable, DeserializationError, Felt, Nonce, Rpo256,
|
||||
Serializable, Word, LOG_N, MODULUS, N, SIG_L2_BOUND, SIG_POLY_BYTE_LEN,
|
||||
};
|
||||
use num::Zero;
|
||||
|
||||
// FALCON SIGNATURE
|
||||
// ================================================================================================
|
||||
|
@ -38,8 +39,8 @@ use num::Zero;
|
|||
/// The signature is serialized as:
|
||||
/// 1. A header byte specifying the algorithm used to encode the coefficients of the `s2` polynomial
|
||||
/// together with the degree of the irreducible polynomial phi. For RPO Falcon512, the header
|
||||
/// byte is set to `10111001` which differentiates it from the standardized instantiation of
|
||||
/// the Falcon signature.
|
||||
/// byte is set to `10111001` which differentiates it from the standardized instantiation of the
|
||||
/// Falcon signature.
|
||||
/// 2. 40 bytes for the nonce.
|
||||
/// 4. 625 bytes encoding the `s2` polynomial above.
|
||||
///
|
||||
|
@ -355,10 +356,11 @@ fn are_coefficients_valid(x: &[i16]) -> bool {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{super::SecretKey, *};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
|
||||
use super::{super::SecretKey, *};
|
||||
|
||||
#[test]
|
||||
fn test_serialization_round_trip() {
|
||||
let seed = [0_u8; 32];
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use alloc::vec::Vec;
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_vector;
|
||||
|
||||
use super::*;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn blake3_hash_elements() {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! Cryptographic hash functions used by the Miden VM and the Miden rollup.
|
||||
|
||||
use super::{CubeExtension, Felt, FieldElement, StarkField, ONE, ZERO};
|
||||
use super::{CubeExtension, Felt, FieldElement, StarkField, ZERO};
|
||||
|
||||
pub mod blake;
|
||||
|
||||
|
|
|
@ -4,40 +4,43 @@ use core::arch::x86_64::*;
|
|||
// https://github.com/0xPolygonZero/plonky2/blob/main/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs
|
||||
|
||||
// Preliminary notes:
|
||||
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily
|
||||
// emulated. The method recognizes that for a + b overflowed iff (a + b) < a:
|
||||
// i. res_lo = a_lo + b_lo
|
||||
// ii. carry_mask = res_lo < a_lo
|
||||
// iii. res_hi = a_hi + b_hi - carry_mask
|
||||
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily emulated.
|
||||
// The method recognizes that for a + b overflowed iff (a + b) < a:
|
||||
// 1. res_lo = a_lo + b_lo
|
||||
// 2. carry_mask = res_lo < a_lo
|
||||
// 3. res_hi = a_hi + b_hi - carry_mask
|
||||
//
|
||||
// Notice that carry_mask is subtracted, not added. This is because AVX comparison instructions
|
||||
// return -1 (all bits 1) for true and 0 for false.
|
||||
//
|
||||
// 2. AVX does not have unsigned 64-bit comparisons. Those can be emulated with signed comparisons
|
||||
// by recognizing that a <u b iff a + (1 << 63) <s b + (1 << 63), where the addition wraps around
|
||||
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts
|
||||
// 1 << 63 to enable this trick.
|
||||
// Example: addition with carry.
|
||||
// i. a_lo_s = shift(a_lo)
|
||||
// ii. res_lo_s = a_lo_s + b_lo
|
||||
// iii. carry_mask = res_lo_s <s a_lo_s
|
||||
// iv. res_lo = shift(res_lo_s)
|
||||
// v. res_hi = a_hi + b_hi - carry_mask
|
||||
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition is
|
||||
// shifted if exactly one of the operands is shifted, as is the case on line ii. Line iii.
|
||||
// performs a signed comparison res_lo_s <s a_lo_s on shifted values to emulate unsigned
|
||||
// comparison res_lo <u a_lo on unshifted values. Finally, line iv. reverses the shift so the
|
||||
// result can be returned.
|
||||
// When performing a chain of calculations, we can often save instructions by letting the shift
|
||||
// propagate through and only undoing it when necessary. For example, to compute the addition of
|
||||
// three two-word (128-bit) numbers we can do:
|
||||
// i. a_lo_s = shift(a_lo)
|
||||
// ii. tmp_lo_s = a_lo_s + b_lo
|
||||
// iii. tmp_carry_mask = tmp_lo_s <s a_lo_s
|
||||
// iv. tmp_hi = a_hi + b_hi - tmp_carry_mask
|
||||
// v. res_lo_s = tmp_lo_s + c_lo
|
||||
// vi. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// vii. res_lo = shift(res_lo_s)
|
||||
// viii. res_hi = tmp_hi + c_hi - res_carry_mask
|
||||
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts 1
|
||||
// << 63 to enable this trick. Addition with carry example:
|
||||
// 1. a_lo_s = shift(a_lo)
|
||||
// 2. res_lo_s = a_lo_s + b_lo
|
||||
// 3. carry_mask = res_lo_s <s a_lo_s
|
||||
// 4. res_lo = shift(res_lo_s)
|
||||
// 5. res_hi = a_hi + b_hi - carry_mask
|
||||
//
|
||||
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition
|
||||
// is shifted if exactly one of the operands is shifted, as is the case on
|
||||
// line 2. Line 3. performs a signed comparison res_lo_s <s a_lo_s on shifted values to
|
||||
// emulate unsigned comparison res_lo <u a_lo on unshifted values. Finally, line 4. reverses the
|
||||
// shift so the result can be returned.
|
||||
//
|
||||
// When performing a chain of calculations, we can often save instructions by letting
|
||||
// the shift propagate through and only undoing it when necessary.
|
||||
// For example, to compute the addition of three two-word (128-bit) numbers we can do:
|
||||
// 1. a_lo_s = shift(a_lo)
|
||||
// 2. tmp_lo_s = a_lo_s + b_lo
|
||||
// 3. tmp_carry_mask = tmp_lo_s <s a_lo_s
|
||||
// 4. tmp_hi = a_hi + b_hi - tmp_carry_mask
|
||||
// 5. res_lo_s = tmp_lo_s + c_lo vi. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// 6. res_carry_mask = res_lo_s <s tmp_lo_s
|
||||
// 7. res_lo = shift(res_lo_s)
|
||||
// 8. res_hi = tmp_hi + c_hi - res_carry_mask
|
||||
//
|
||||
// Notice that the above 3-value addition still only requires two calls to shift, just like our
|
||||
// 2-value addition.
|
||||
|
||||
|
@ -60,10 +63,10 @@ pub fn branch_hint() {
|
|||
}
|
||||
|
||||
macro_rules! map3 {
|
||||
($f:ident::<$l:literal>, $v:ident) => {
|
||||
($f:ident:: < $l:literal > , $v:ident) => {
|
||||
($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2))
|
||||
};
|
||||
($f:ident::<$l:literal>, $v1:ident, $v2:ident) => {
|
||||
($f:ident:: < $l:literal > , $v1:ident, $v2:ident) => {
|
||||
($f::<$l>($v1.0, $v2.0), $f::<$l>($v1.1, $v2.1), $f::<$l>($v1.2, $v2.2))
|
||||
};
|
||||
($f:ident, $v:ident) => {
|
||||
|
@ -72,11 +75,11 @@ macro_rules! map3 {
|
|||
($f:ident, $v0:ident, $v1:ident) => {
|
||||
($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2))
|
||||
};
|
||||
($f:ident, rep $v0:ident, $v1:ident) => {
|
||||
($f:ident,rep $v0:ident, $v1:ident) => {
|
||||
($f($v0, $v1.0), $f($v0, $v1.1), $f($v0, $v1.2))
|
||||
};
|
||||
|
||||
($f:ident, $v0:ident, rep $v1:ident) => {
|
||||
($f:ident, $v0:ident,rep $v1:ident) => {
|
||||
($f($v0.0, $v1), $f($v0.1, $v1), $f($v0.2, $v1))
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,26 +1,28 @@
|
|||
// FFT-BASED MDS MULTIPLICATION HELPER FUNCTIONS
|
||||
// ================================================================================================
|
||||
|
||||
/// This module contains helper functions as well as constants used to perform the vector-matrix
|
||||
/// multiplication step of the Rescue prime permutation. The special form of our MDS matrix
|
||||
/// i.e. being circular, allows us to reduce the vector-matrix multiplication to a Hadamard product
|
||||
/// of two vectors in "frequency domain". This follows from the simple fact that every circulant
|
||||
/// matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
|
||||
/// The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
|
||||
/// with explicit expressions. It also avoids, due to the form of our matrix in the frequency domain,
|
||||
/// divisions by 2 and repeated modular reductions. This is because of our explicit choice of
|
||||
/// an MDS matrix that has small powers of 2 entries in frequency domain.
|
||||
/// The following implementation has benefited greatly from the discussions and insights of
|
||||
/// Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
|
||||
/// implementation.
|
||||
//! This module contains helper functions as well as constants used to perform the vector-matrix
|
||||
//! multiplication step of the Rescue prime permutation. The special form of our MDS matrix
|
||||
//! i.e. being circular, allows us to reduce the vector-matrix multiplication to a Hadamard product
|
||||
//! of two vectors in "frequency domain". This follows from the simple fact that every circulant
|
||||
//! matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
|
||||
//! The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
|
||||
//! with explicit expressions. It also avoids, due to the form of our matrix in the frequency
|
||||
//! domain, divisions by 2 and repeated modular reductions. This is because of our explicit choice
|
||||
//! of an MDS matrix that has small powers of 2 entries in frequency domain.
|
||||
//! The following implementation has benefited greatly from the discussions and insights of
|
||||
//! Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
|
||||
//! implementation.
|
||||
|
||||
// Rescue MDS matrix in frequency domain.
|
||||
//
|
||||
// More precisely, this is the output of the three 4-point (real) FFTs of the first column of
|
||||
// the MDS matrix i.e. just before the multiplication with the appropriate twiddle factors
|
||||
// and application of the final four 3-point FFT in order to get the full 12-point FFT.
|
||||
// The entries have been scaled appropriately in order to avoid divisions by 2 in iFFT2 and iFFT4.
|
||||
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to generate
|
||||
// MDS matrices efficiently in original domain, that was developed by the Polygon Zero team.
|
||||
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to
|
||||
// generate MDS matrices efficiently in original domain, that was developed by the Polygon Zero
|
||||
// team.
|
||||
const MDS_FREQ_BLOCK_ONE: [i64; 3] = [16, 8, 16];
|
||||
const MDS_FREQ_BLOCK_TWO: [(i64, i64); 3] = [(-1, 2), (-1, 1), (4, 8)];
|
||||
const MDS_FREQ_BLOCK_THREE: [i64; 3] = [-8, 1, 1];
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
use core::ops::Range;
|
||||
|
||||
use super::{
|
||||
CubeExtension, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ONE, ZERO,
|
||||
};
|
||||
use super::{CubeExtension, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ZERO};
|
||||
|
||||
mod arch;
|
||||
pub use arch::optimized::{add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox};
|
||||
|
|
|
@ -466,6 +466,7 @@ impl IntoIterator for RpoDigest {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::string::String;
|
||||
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Deserializable, Felt, RpoDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};
|
||||
|
|
|
@ -4,7 +4,7 @@ use super::{
|
|||
add_constants, add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox, apply_inv_sbox,
|
||||
apply_mds, apply_sbox, Digest, ElementHasher, Felt, FieldElement, Hasher, StarkField, ARK1,
|
||||
ARK2, BINARY_CHUNK_SIZE, CAPACITY_RANGE, DIGEST_BYTES, DIGEST_RANGE, DIGEST_SIZE, INPUT1_RANGE,
|
||||
INPUT2_RANGE, MDS, NUM_ROUNDS, ONE, RATE_RANGE, RATE_WIDTH, STATE_WIDTH, ZERO,
|
||||
INPUT2_RANGE, MDS, NUM_ROUNDS, RATE_RANGE, RATE_WIDTH, STATE_WIDTH, ZERO,
|
||||
};
|
||||
|
||||
mod digest;
|
||||
|
@ -19,7 +19,8 @@ mod tests;
|
|||
/// Implementation of the Rescue Prime Optimized hash function with 256-bit output.
|
||||
///
|
||||
/// The hash function is implemented according to the Rescue Prime Optimized
|
||||
/// [specifications](https://eprint.iacr.org/2022/1577)
|
||||
/// [specifications](https://eprint.iacr.org/2022/1577) while the padding rule follows the one
|
||||
/// described [here](https://eprint.iacr.org/2023/1045).
|
||||
///
|
||||
/// The parameters used to instantiate the function are:
|
||||
/// * Field: 64-bit prime field with modulus p = 2^64 - 2^32 + 1.
|
||||
|
@ -51,7 +52,7 @@ mod tests;
|
|||
///
|
||||
/// Thus, if the underlying data consists of valid field elements, it might make more sense
|
||||
/// to deserialize them into field elements and then hash them using
|
||||
/// [hash_elements()](Rpo256::hash_elements) function rather then hashing the serialized bytes
|
||||
/// [hash_elements()](Rpo256::hash_elements) function rather than hashing the serialized bytes
|
||||
/// using [hash()](Rpo256::hash) function.
|
||||
///
|
||||
/// ## Domain separation
|
||||
|
@ -64,6 +65,10 @@ mod tests;
|
|||
/// becomes the bottleneck for the security bound of the sponge in overwrite-mode only when it is
|
||||
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
|
||||
/// the size of the domain identifier space, including for padding, is less than 2^128.
|
||||
///
|
||||
/// ## Hashing of empty input
|
||||
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
|
||||
/// the benefit of requiring no calls to the RPO permutation when hashing empty input.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct Rpo256();
|
||||
|
||||
|
@ -77,14 +82,16 @@ impl Hasher for Rpo256 {
|
|||
// initialize the state with zeroes
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
|
||||
// set the capacity (first element) to a flag on whether or not the input length is evenly
|
||||
// divided by the rate. this will prevent collisions between padded and non-padded inputs,
|
||||
// and will rule out the need to perform an extra permutation in case of evenly divided
|
||||
// inputs.
|
||||
let is_rate_multiple = bytes.len() % RATE_WIDTH == 0;
|
||||
if !is_rate_multiple {
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
}
|
||||
// determine the number of field elements needed to encode `bytes` when each field element
|
||||
// represents at most 7 bytes.
|
||||
let num_field_elem = bytes.len().div_ceil(BINARY_CHUNK_SIZE);
|
||||
|
||||
// set the first capacity element to `RATE_WIDTH + (num_field_elem % RATE_WIDTH)`. We do
|
||||
// this to achieve:
|
||||
// 1. Domain separating hashing of `[u8]` from hashing of `[Felt]`.
|
||||
// 2. Avoiding collisions at the `[Felt]` representation of the encoded bytes.
|
||||
state[CAPACITY_RANGE.start] =
|
||||
Felt::from((RATE_WIDTH + (num_field_elem % RATE_WIDTH)) as u8);
|
||||
|
||||
// initialize a buffer to receive the little-endian elements.
|
||||
let mut buf = [0_u8; 8];
|
||||
|
@ -93,41 +100,49 @@ impl Hasher for Rpo256 {
|
|||
// into the state.
|
||||
//
|
||||
// every time the rate range is filled, a permutation is performed. if the final value of
|
||||
// `i` is not zero, then the chunks count wasn't enough to fill the state range, and an
|
||||
// additional permutation must be performed.
|
||||
let i = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |i, chunk| {
|
||||
// the last element of the iteration may or may not be a full chunk. if it's not, then
|
||||
// we need to pad the remainder bytes of the chunk with zeroes, separated by a `1`.
|
||||
// this will avoid collisions.
|
||||
if chunk.len() == BINARY_CHUNK_SIZE {
|
||||
// `rate_pos` is not zero, then the chunks count wasn't enough to fill the state range,
|
||||
// and an additional permutation must be performed.
|
||||
let mut current_chunk_idx = 0_usize;
|
||||
// handle the case of an empty `bytes`
|
||||
let last_chunk_idx = if num_field_elem == 0 {
|
||||
current_chunk_idx
|
||||
} else {
|
||||
num_field_elem - 1
|
||||
};
|
||||
let rate_pos = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |rate_pos, chunk| {
|
||||
// copy the chunk into the buffer
|
||||
if current_chunk_idx != last_chunk_idx {
|
||||
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
|
||||
} else {
|
||||
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
|
||||
// needed to fill it
|
||||
buf.fill(0);
|
||||
buf[..chunk.len()].copy_from_slice(chunk);
|
||||
buf[chunk.len()] = 1;
|
||||
}
|
||||
current_chunk_idx += 1;
|
||||
|
||||
// set the current rate element to the input. since we take at most 7 bytes, we are
|
||||
// guaranteed that the inputs data will fit into a single field element.
|
||||
state[RATE_RANGE.start + i] = Felt::new(u64::from_le_bytes(buf));
|
||||
state[RATE_RANGE.start + rate_pos] = Felt::new(u64::from_le_bytes(buf));
|
||||
|
||||
// proceed filling the range. if it's full, then we apply a permutation and reset the
|
||||
// counter to the beginning of the range.
|
||||
if i == RATE_WIDTH - 1 {
|
||||
if rate_pos == RATE_WIDTH - 1 {
|
||||
Self::apply_permutation(&mut state);
|
||||
0
|
||||
} else {
|
||||
i + 1
|
||||
rate_pos + 1
|
||||
}
|
||||
});
|
||||
|
||||
// if we absorbed some elements but didn't apply a permutation to them (would happen when
|
||||
// the number of elements is not a multiple of RATE_WIDTH), apply the RPO permutation. we
|
||||
// don't need to apply any extra padding because the first capacity element contains a
|
||||
// flag indicating whether the input is evenly divisible by the rate.
|
||||
if i != 0 {
|
||||
state[RATE_RANGE.start + i..RATE_RANGE.end].fill(ZERO);
|
||||
state[RATE_RANGE.start + i] = ONE;
|
||||
// flag indicating the number of field elements constituting the last block when the latter
|
||||
// is not divisible by `RATE_WIDTH`.
|
||||
if rate_pos != 0 {
|
||||
state[RATE_RANGE.start + rate_pos..RATE_RANGE.end].fill(ZERO);
|
||||
Self::apply_permutation(&mut state);
|
||||
}
|
||||
|
||||
|
@ -152,26 +167,21 @@ impl Hasher for Rpo256 {
|
|||
fn merge_with_int(seed: Self::Digest, value: u64) -> Self::Digest {
|
||||
// initialize the state as follows:
|
||||
// - seed is copied into the first 4 elements of the rate portion of the state.
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element
|
||||
// and set the sixth rate element to 1.
|
||||
// - if the value doesn't fit into a single field element, split it into two field
|
||||
// elements, copy them into rate elements 5 and 6, and set the seventh rate element
|
||||
// to 1.
|
||||
// - set the first capacity element to 1
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element and
|
||||
// set the first capacity element to 5.
|
||||
// - if the value doesn't fit into a single field element, split it into two field elements,
|
||||
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
|
||||
state[INPUT2_RANGE.start] = Felt::new(value);
|
||||
if value < Felt::MODULUS {
|
||||
state[INPUT2_RANGE.start + 1] = ONE;
|
||||
state[CAPACITY_RANGE.start] = Felt::from(5_u8);
|
||||
} else {
|
||||
state[INPUT2_RANGE.start + 1] = Felt::new(value / Felt::MODULUS);
|
||||
state[INPUT2_RANGE.start + 2] = ONE;
|
||||
state[CAPACITY_RANGE.start] = Felt::from(6_u8);
|
||||
}
|
||||
|
||||
// common padding for both cases
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
|
||||
// apply the RPO permutation and return the first four elements of the state
|
||||
// apply the RPO permutation and return the first four elements of the rate
|
||||
Self::apply_permutation(&mut state);
|
||||
RpoDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
|
@ -185,11 +195,9 @@ impl ElementHasher for Rpo256 {
|
|||
let elements = E::slice_as_base_elements(elements);
|
||||
|
||||
// initialize state to all zeros, except for the first element of the capacity part, which
|
||||
// is set to 1 if the number of elements is not a multiple of RATE_WIDTH.
|
||||
// is set to `elements.len() % RATE_WIDTH`.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
if elements.len() % RATE_WIDTH != 0 {
|
||||
state[CAPACITY_RANGE.start] = ONE;
|
||||
}
|
||||
state[CAPACITY_RANGE.start] = Self::BaseField::from((elements.len() % RATE_WIDTH) as u8);
|
||||
|
||||
// absorb elements into the state one by one until the rate portion of the state is filled
|
||||
// up; then apply the Rescue permutation and start absorbing again; repeat until all
|
||||
|
@ -206,11 +214,8 @@ impl ElementHasher for Rpo256 {
|
|||
|
||||
// if we absorbed some elements but didn't apply a permutation to them (would happen when
|
||||
// the number of elements is not a multiple of RATE_WIDTH), apply the RPO permutation after
|
||||
// padding by appending a 1 followed by as many 0 as necessary to make the input length a
|
||||
// multiple of the RATE_WIDTH.
|
||||
// padding by as many 0 as necessary to make the input length a multiple of the RATE_WIDTH.
|
||||
if i > 0 {
|
||||
state[RATE_RANGE.start + i] = ONE;
|
||||
i += 1;
|
||||
while i != RATE_WIDTH {
|
||||
state[RATE_RANGE.start + i] = ZERO;
|
||||
i += 1;
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{
|
||||
super::{apply_inv_sbox, apply_sbox, ALPHA, INV_ALPHA},
|
||||
Felt, FieldElement, Hasher, Rpo256, RpoDigest, StarkField, ONE, STATE_WIDTH, ZERO,
|
||||
Felt, FieldElement, Hasher, Rpo256, RpoDigest, StarkField, STATE_WIDTH, ZERO,
|
||||
};
|
||||
use crate::{
|
||||
hash::rescue::{BINARY_CHUNK_SIZE, CAPACITY_RANGE, RATE_WIDTH},
|
||||
Word, ONE,
|
||||
};
|
||||
use crate::Word;
|
||||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
#[test]
|
||||
fn test_sbox() {
|
||||
|
@ -58,7 +62,7 @@ fn merge_vs_merge_in_domain() {
|
|||
];
|
||||
let merge_result = Rpo256::merge(&digests);
|
||||
|
||||
// ------------- merge with domain = 0 ----------------------------------------------------------
|
||||
// ------------- merge with domain = 0 -------------
|
||||
|
||||
// set domain to ZERO. This should not change the result.
|
||||
let domain = ZERO;
|
||||
|
@ -66,7 +70,7 @@ fn merge_vs_merge_in_domain() {
|
|||
let merge_in_domain_result = Rpo256::merge_in_domain(&digests, domain);
|
||||
assert_eq!(merge_result, merge_in_domain_result);
|
||||
|
||||
// ------------- merge with domain = 1 ----------------------------------------------------------
|
||||
// ------------- merge with domain = 1 -------------
|
||||
|
||||
// set domain to ONE. This should change the result.
|
||||
let domain = ONE;
|
||||
|
@ -125,6 +129,27 @@ fn hash_padding() {
|
|||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_padding_no_extra_permutation_call() {
|
||||
use crate::hash::rescue::DIGEST_RANGE;
|
||||
|
||||
// Implementation
|
||||
let num_bytes = BINARY_CHUNK_SIZE * RATE_WIDTH;
|
||||
let mut buffer = vec![0_u8; num_bytes];
|
||||
*buffer.last_mut().unwrap() = 97;
|
||||
let r1 = Rpo256::hash(&buffer);
|
||||
|
||||
// Expected
|
||||
let final_chunk = [0_u8, 0, 0, 0, 0, 0, 97, 1];
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
// padding when hashing bytes
|
||||
state[CAPACITY_RANGE.start] = Felt::from(RATE_WIDTH as u8);
|
||||
*state.last_mut().unwrap() = Felt::new(u64::from_le_bytes(final_chunk));
|
||||
Rpo256::apply_permutation(&mut state);
|
||||
|
||||
assert_eq!(&r1[0..4], &state[DIGEST_RANGE]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_padding() {
|
||||
let e1 = [Felt::new(rand_value()); 2];
|
||||
|
@ -158,6 +183,24 @@ fn hash_elements() {
|
|||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty() {
|
||||
let elements: Vec<Felt> = vec![];
|
||||
|
||||
let zero_digest = RpoDigest::default();
|
||||
let h_result = Rpo256::hash_elements(&elements);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty_bytes() {
|
||||
let bytes: Vec<u8> = vec![];
|
||||
|
||||
let zero_digest = RpoDigest::default();
|
||||
let h_result = Rpo256::hash(&bytes);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_test_vectors() {
|
||||
let elements = [
|
||||
|
@ -228,46 +271,46 @@ proptest! {
|
|||
|
||||
const EXPECTED: [Word; 19] = [
|
||||
[
|
||||
Felt::new(1502364727743950833),
|
||||
Felt::new(5880949717274681448),
|
||||
Felt::new(162790463902224431),
|
||||
Felt::new(6901340476773664264),
|
||||
Felt::new(18126731724905382595),
|
||||
Felt::new(7388557040857728717),
|
||||
Felt::new(14290750514634285295),
|
||||
Felt::new(7852282086160480146),
|
||||
],
|
||||
[
|
||||
Felt::new(7478710183745780580),
|
||||
Felt::new(3308077307559720969),
|
||||
Felt::new(3383561985796182409),
|
||||
Felt::new(17205078494700259815),
|
||||
Felt::new(10139303045932500183),
|
||||
Felt::new(2293916558361785533),
|
||||
Felt::new(15496361415980502047),
|
||||
Felt::new(17904948502382283940),
|
||||
],
|
||||
[
|
||||
Felt::new(17439912364295172999),
|
||||
Felt::new(17979156346142712171),
|
||||
Felt::new(8280795511427637894),
|
||||
Felt::new(9349844417834368814),
|
||||
Felt::new(17457546260239634015),
|
||||
Felt::new(803990662839494686),
|
||||
Felt::new(10386005777401424878),
|
||||
Felt::new(18168807883298448638),
|
||||
],
|
||||
[
|
||||
Felt::new(5105868198472766874),
|
||||
Felt::new(13090564195691924742),
|
||||
Felt::new(1058904296915798891),
|
||||
Felt::new(18379501748825152268),
|
||||
Felt::new(13072499238647455740),
|
||||
Felt::new(10174350003422057273),
|
||||
Felt::new(9201651627651151113),
|
||||
Felt::new(6872461887313298746),
|
||||
],
|
||||
[
|
||||
Felt::new(9133662113608941286),
|
||||
Felt::new(12096627591905525991),
|
||||
Felt::new(14963426595993304047),
|
||||
Felt::new(13290205840019973377),
|
||||
Felt::new(2903803350580990546),
|
||||
Felt::new(1838870750730563299),
|
||||
Felt::new(4258619137315479708),
|
||||
Felt::new(17334260395129062936),
|
||||
],
|
||||
[
|
||||
Felt::new(3134262397541159485),
|
||||
Felt::new(10106105871979362399),
|
||||
Felt::new(138768814855329459),
|
||||
Felt::new(15044809212457404677),
|
||||
Felt::new(8571221005243425262),
|
||||
Felt::new(3016595589318175865),
|
||||
Felt::new(13933674291329928438),
|
||||
Felt::new(678640375034313072),
|
||||
],
|
||||
[
|
||||
Felt::new(162696376578462826),
|
||||
Felt::new(4991300494838863586),
|
||||
Felt::new(660346084748120605),
|
||||
Felt::new(13179389528641752698),
|
||||
Felt::new(16314113978986502310),
|
||||
Felt::new(14587622368743051587),
|
||||
Felt::new(2808708361436818462),
|
||||
Felt::new(10660517522478329440),
|
||||
],
|
||||
[
|
||||
Felt::new(2242391899857912644),
|
||||
|
@ -276,46 +319,46 @@ const EXPECTED: [Word; 19] = [
|
|||
Felt::new(5046143039268215739),
|
||||
],
|
||||
[
|
||||
Felt::new(9585630502158073976),
|
||||
Felt::new(1310051013427303477),
|
||||
Felt::new(7491921222636097758),
|
||||
Felt::new(9417501558995216762),
|
||||
Felt::new(5218076004221736204),
|
||||
Felt::new(17169400568680971304),
|
||||
Felt::new(8840075572473868990),
|
||||
Felt::new(12382372614369863623),
|
||||
],
|
||||
[
|
||||
Felt::new(1994394001720334744),
|
||||
Felt::new(10866209900885216467),
|
||||
Felt::new(13836092831163031683),
|
||||
Felt::new(10814636682252756697),
|
||||
Felt::new(9783834557155203486),
|
||||
Felt::new(12317263104955018849),
|
||||
Felt::new(3933748931816109604),
|
||||
Felt::new(1843043029836917214),
|
||||
],
|
||||
[
|
||||
Felt::new(17486854790732826405),
|
||||
Felt::new(17376549265955727562),
|
||||
Felt::new(2371059831956435003),
|
||||
Felt::new(17585704935858006533),
|
||||
Felt::new(14498234468286984551),
|
||||
Felt::new(16837257669834682387),
|
||||
Felt::new(6664141123711355107),
|
||||
Felt::new(4590460158294697186),
|
||||
],
|
||||
[
|
||||
Felt::new(11368277489137713825),
|
||||
Felt::new(3906270146963049287),
|
||||
Felt::new(10236262408213059745),
|
||||
Felt::new(78552867005814007),
|
||||
Felt::new(4661800562479916067),
|
||||
Felt::new(11794407552792839953),
|
||||
Felt::new(9037742258721863712),
|
||||
Felt::new(6287820818064278819),
|
||||
],
|
||||
[
|
||||
Felt::new(17899847381280262181),
|
||||
Felt::new(14717912805498651446),
|
||||
Felt::new(10769146203951775298),
|
||||
Felt::new(2774289833490417856),
|
||||
Felt::new(7752693085194633729),
|
||||
Felt::new(7379857372245835536),
|
||||
Felt::new(9270229380648024178),
|
||||
Felt::new(10638301488452560378),
|
||||
],
|
||||
[
|
||||
Felt::new(3794717687462954368),
|
||||
Felt::new(4386865643074822822),
|
||||
Felt::new(8854162840275334305),
|
||||
Felt::new(7129983987107225269),
|
||||
Felt::new(11542686762698783357),
|
||||
Felt::new(15570714990728449027),
|
||||
Felt::new(7518801014067819501),
|
||||
Felt::new(12706437751337583515),
|
||||
],
|
||||
[
|
||||
Felt::new(7244773535611633983),
|
||||
Felt::new(19359923075859320),
|
||||
Felt::new(10898655967774994333),
|
||||
Felt::new(9319339563065736480),
|
||||
Felt::new(9553923701032839042),
|
||||
Felt::new(7281190920209838818),
|
||||
Felt::new(2488477917448393955),
|
||||
Felt::new(5088955350303368837),
|
||||
],
|
||||
[
|
||||
Felt::new(4935426252518736883),
|
||||
|
@ -324,21 +367,21 @@ const EXPECTED: [Word; 19] = [
|
|||
Felt::new(18159875708229758073),
|
||||
],
|
||||
[
|
||||
Felt::new(14871230873837295931),
|
||||
Felt::new(11225255908868362971),
|
||||
Felt::new(18100987641405432308),
|
||||
Felt::new(1559244340089644233),
|
||||
Felt::new(12795429638314178838),
|
||||
Felt::new(14360248269767567855),
|
||||
Felt::new(3819563852436765058),
|
||||
Felt::new(10859123583999067291),
|
||||
],
|
||||
[
|
||||
Felt::new(8348203744950016968),
|
||||
Felt::new(4041411241960726733),
|
||||
Felt::new(17584743399305468057),
|
||||
Felt::new(16836952610803537051),
|
||||
Felt::new(2695742617679420093),
|
||||
Felt::new(9151515850666059759),
|
||||
Felt::new(15855828029180595485),
|
||||
Felt::new(17190029785471463210),
|
||||
],
|
||||
[
|
||||
Felt::new(16139797453633030050),
|
||||
Felt::new(1090233424040889412),
|
||||
Felt::new(10770255347785669036),
|
||||
Felt::new(16982398877290254028),
|
||||
Felt::new(13205273108219124830),
|
||||
Felt::new(2524898486192849221),
|
||||
Felt::new(14618764355375283547),
|
||||
Felt::new(10615614265042186874),
|
||||
],
|
||||
];
|
||||
|
|
|
@ -466,6 +466,7 @@ impl IntoIterator for RpxDigest {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::string::String;
|
||||
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Deserializable, Felt, RpxDigest, Serializable, DIGEST_BYTES, DIGEST_SIZE};
|
||||
|
|
|
@ -11,6 +11,9 @@ use super::{
|
|||
mod digest;
|
||||
pub use digest::{RpxDigest, RpxDigestError};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub type CubicExtElement = CubeExtension<Felt>;
|
||||
|
||||
// HASHER IMPLEMENTATION
|
||||
|
@ -26,8 +29,10 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
|||
/// * Capacity size: 4 field elements.
|
||||
/// * S-Box degree: 7.
|
||||
/// * Rounds: There are 3 different types of rounds:
|
||||
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` → `apply_inv_sbox`.
|
||||
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension field).
|
||||
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` →
|
||||
/// `apply_inv_sbox`.
|
||||
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension
|
||||
/// field).
|
||||
/// - (M): `apply_mds` → `add_constants`.
|
||||
/// * Permutation: (FB) (E) (FB) (E) (FB) (E) (M).
|
||||
///
|
||||
|
@ -53,7 +58,7 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
|||
///
|
||||
/// Thus, if the underlying data consists of valid field elements, it might make more sense
|
||||
/// to deserialize them into field elements and then hash them using
|
||||
/// [hash_elements()](Rpx256::hash_elements) function rather then hashing the serialized bytes
|
||||
/// [hash_elements()](Rpx256::hash_elements) function rather than hashing the serialized bytes
|
||||
/// using [hash()](Rpx256::hash) function.
|
||||
///
|
||||
/// ## Domain separation
|
||||
|
@ -66,6 +71,10 @@ pub type CubicExtElement = CubeExtension<Felt>;
|
|||
/// the bottleneck for the security bound of the sponge in overwrite-mode only when it is
|
||||
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
|
||||
/// the size of the domain identifier space, including for padding, is less than 2^128.
|
||||
///
|
||||
/// ## Hashing of empty input
|
||||
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
|
||||
/// the benefit of requiring no calls to the RPX permutation when hashing empty input.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct Rpx256();
|
||||
|
||||
|
@ -97,11 +106,18 @@ impl Hasher for Rpx256 {
|
|||
// into the state.
|
||||
//
|
||||
// every time the rate range is filled, a permutation is performed. if the final value of
|
||||
// `i` is not zero, then the chunks count wasn't enough to fill the state range, and an
|
||||
// additional permutation must be performed.
|
||||
let i = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |i, chunk| {
|
||||
// `rate_pos` is not zero, then the chunks count wasn't enough to fill the state range,
|
||||
// and an additional permutation must be performed.
|
||||
let mut current_chunk_idx = 0_usize;
|
||||
// handle the case of an empty `bytes`
|
||||
let last_chunk_idx = if num_field_elem == 0 {
|
||||
current_chunk_idx
|
||||
} else {
|
||||
num_field_elem - 1
|
||||
};
|
||||
let rate_pos = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |rate_pos, chunk| {
|
||||
// copy the chunk into the buffer
|
||||
if i != num_field_elem - 1 {
|
||||
if current_chunk_idx != last_chunk_idx {
|
||||
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
|
||||
} else {
|
||||
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
|
||||
|
@ -110,18 +126,19 @@ impl Hasher for Rpx256 {
|
|||
buf[..chunk.len()].copy_from_slice(chunk);
|
||||
buf[chunk.len()] = 1;
|
||||
}
|
||||
current_chunk_idx += 1;
|
||||
|
||||
// set the current rate element to the input. since we take at most 7 bytes, we are
|
||||
// guaranteed that the inputs data will fit into a single field element.
|
||||
state[RATE_RANGE.start + i] = Felt::new(u64::from_le_bytes(buf));
|
||||
state[RATE_RANGE.start + rate_pos] = Felt::new(u64::from_le_bytes(buf));
|
||||
|
||||
// proceed filling the range. if it's full, then we apply a permutation and reset the
|
||||
// counter to the beginning of the range.
|
||||
if i == RATE_WIDTH - 1 {
|
||||
if rate_pos == RATE_WIDTH - 1 {
|
||||
Self::apply_permutation(&mut state);
|
||||
0
|
||||
} else {
|
||||
i + 1
|
||||
rate_pos + 1
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -130,8 +147,8 @@ impl Hasher for Rpx256 {
|
|||
// don't need to apply any extra padding because the first capacity element contains a
|
||||
// flag indicating the number of field elements constituting the last block when the latter
|
||||
// is not divisible by `RATE_WIDTH`.
|
||||
if i != 0 {
|
||||
state[RATE_RANGE.start + i..RATE_RANGE.end].fill(ZERO);
|
||||
if rate_pos != 0 {
|
||||
state[RATE_RANGE.start + rate_pos..RATE_RANGE.end].fill(ZERO);
|
||||
Self::apply_permutation(&mut state);
|
||||
}
|
||||
|
||||
|
@ -158,8 +175,8 @@ impl Hasher for Rpx256 {
|
|||
// - seed is copied into the first 4 elements of the rate portion of the state.
|
||||
// - if the value fits into a single field element, copy it into the fifth rate element and
|
||||
// set the first capacity element to 5.
|
||||
// - if the value doesn't fit into a single field element, split it into two field
|
||||
// elements, copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
// - if the value doesn't fit into a single field element, split it into two field elements,
|
||||
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
|
||||
let mut state = [ZERO; STATE_WIDTH];
|
||||
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
|
||||
state[INPUT2_RANGE.start] = Felt::new(value);
|
||||
|
@ -170,7 +187,7 @@ impl Hasher for Rpx256 {
|
|||
state[CAPACITY_RANGE.start] = Felt::from(6_u8);
|
||||
}
|
||||
|
||||
// apply the RPX permutation and return the first four elements of the state
|
||||
// apply the RPX permutation and return the first four elements of the rate
|
||||
Self::apply_permutation(&mut state);
|
||||
RpxDigest::new(state[DIGEST_RANGE].try_into().unwrap())
|
||||
}
|
||||
|
|
186
src/hash/rescue/rpx/tests.rs
Normal file
186
src/hash/rescue/rpx/tests.rs
Normal file
|
@ -0,0 +1,186 @@
|
|||
use alloc::{collections::BTreeSet, vec::Vec};
|
||||
|
||||
use proptest::prelude::*;
|
||||
use rand_utils::rand_value;
|
||||
|
||||
use super::{Felt, Hasher, Rpx256, StarkField, ZERO};
|
||||
use crate::{hash::rescue::RpxDigest, ONE};
|
||||
|
||||
#[test]
|
||||
fn hash_elements_vs_merge() {
|
||||
let elements = [Felt::new(rand_value()); 8];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..].try_into().unwrap()),
|
||||
];
|
||||
|
||||
let m_result = Rpx256::merge(&digests);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_vs_merge_in_domain() {
|
||||
let elements = [Felt::new(rand_value()); 8];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..].try_into().unwrap()),
|
||||
];
|
||||
let merge_result = Rpx256::merge(&digests);
|
||||
|
||||
// ----- merge with domain = 0 ----------------------------------------------------------------
|
||||
|
||||
// set domain to ZERO. This should not change the result.
|
||||
let domain = ZERO;
|
||||
|
||||
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
|
||||
assert_eq!(merge_result, merge_in_domain_result);
|
||||
|
||||
// ----- merge with domain = 1 ----------------------------------------------------------------
|
||||
|
||||
// set domain to ONE. This should change the result.
|
||||
let domain = ONE;
|
||||
|
||||
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
|
||||
assert_ne!(merge_result, merge_in_domain_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_vs_merge_with_int() {
|
||||
let tmp = [Felt::new(rand_value()); 4];
|
||||
let seed = RpxDigest::new(tmp);
|
||||
|
||||
// ----- value fits into a field element ------------------------------------------------------
|
||||
let val: Felt = Felt::new(rand_value());
|
||||
let m_result = Rpx256::merge_with_int(seed, val.as_int());
|
||||
|
||||
let mut elements = seed.as_elements().to_vec();
|
||||
elements.push(val);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
|
||||
assert_eq!(m_result, h_result);
|
||||
|
||||
// ----- value does not fit into a field element ----------------------------------------------
|
||||
let val = Felt::MODULUS + 2;
|
||||
let m_result = Rpx256::merge_with_int(seed, val);
|
||||
|
||||
let mut elements = seed.as_elements().to_vec();
|
||||
elements.push(Felt::new(val));
|
||||
elements.push(ONE);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_padding() {
|
||||
// adding a zero bytes at the end of a byte string should result in a different hash
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with bigger inputs
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with input splitting over two elements
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
|
||||
// same as above but with multiple zeros
|
||||
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0]);
|
||||
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0]);
|
||||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements_padding() {
|
||||
let e1 = [Felt::new(rand_value()); 2];
|
||||
let e2 = [e1[0], e1[1], ZERO];
|
||||
|
||||
let r1 = Rpx256::hash_elements(&e1);
|
||||
let r2 = Rpx256::hash_elements(&e2);
|
||||
assert_ne!(r1, r2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_elements() {
|
||||
let elements = [
|
||||
ZERO,
|
||||
ONE,
|
||||
Felt::new(2),
|
||||
Felt::new(3),
|
||||
Felt::new(4),
|
||||
Felt::new(5),
|
||||
Felt::new(6),
|
||||
Felt::new(7),
|
||||
];
|
||||
|
||||
let digests: [RpxDigest; 2] = [
|
||||
RpxDigest::new(elements[..4].try_into().unwrap()),
|
||||
RpxDigest::new(elements[4..8].try_into().unwrap()),
|
||||
];
|
||||
|
||||
let m_result = Rpx256::merge(&digests);
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(m_result, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty() {
|
||||
let elements: Vec<Felt> = vec![];
|
||||
|
||||
let zero_digest = RpxDigest::default();
|
||||
let h_result = Rpx256::hash_elements(&elements);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hash_empty_bytes() {
|
||||
let bytes: Vec<u8> = vec![];
|
||||
|
||||
let zero_digest = RpxDigest::default();
|
||||
let h_result = Rpx256::hash(&bytes);
|
||||
assert_eq!(zero_digest, h_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_bytes_with_remainder_length_wont_panic() {
|
||||
// this test targets to assert that no panic will happen with the edge case of having an inputs
|
||||
// with length that is not divisible by the used binary chunk size. 113 is a non-negligible
|
||||
// input length that is prime; hence guaranteed to not be divisible by any choice of chunk
|
||||
// size.
|
||||
//
|
||||
// this is a preliminary test to the fuzzy-stress of proptest.
|
||||
Rpx256::hash(&[0; 113]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_collision_for_wrapped_field_element() {
|
||||
let a = Rpx256::hash(&[0; 8]);
|
||||
let b = Rpx256::hash(&Felt::MODULUS.to_le_bytes());
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sponge_zeroes_collision() {
|
||||
let mut zeroes = Vec::with_capacity(255);
|
||||
let mut set = BTreeSet::new();
|
||||
(0..255).for_each(|_| {
|
||||
let hash = Rpx256::hash(&zeroes);
|
||||
zeroes.push(0);
|
||||
// panic if a collision was found
|
||||
assert!(set.insert(hash));
|
||||
});
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn rpo256_wont_panic_with_arbitrary_input(ref bytes in any::<Vec<u8>>()) {
|
||||
Rpx256::hash(bytes);
|
||||
}
|
||||
}
|
49
src/main.rs
49
src/main.rs
|
@ -35,6 +35,7 @@ pub fn benchmark_smt() {
|
|||
|
||||
let mut tree = construction(entries, tree_size).unwrap();
|
||||
insertion(&mut tree, tree_size).unwrap();
|
||||
batched_insertion(&mut tree, tree_size).unwrap();
|
||||
proof_generation(&mut tree, tree_size).unwrap();
|
||||
}
|
||||
|
||||
|
@ -82,6 +83,54 @@ pub fn insertion(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn batched_insertion(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
||||
println!("Running a batched insertion benchmark:");
|
||||
|
||||
let new_pairs: Vec<(RpoDigest, Word)> = (0..1000)
|
||||
.map(|i| {
|
||||
let key = Rpo256::hash(&rand_value::<u64>().to_be_bytes());
|
||||
let value = [ONE, ONE, ONE, Felt::new(size + i)];
|
||||
(key, value)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let now = Instant::now();
|
||||
let mutations = tree.compute_mutations(new_pairs);
|
||||
let compute_elapsed = now.elapsed();
|
||||
|
||||
let now = Instant::now();
|
||||
tree.apply_mutations(mutations).unwrap();
|
||||
let apply_elapsed = now.elapsed();
|
||||
|
||||
println!(
|
||||
"An average batch computation time measured by a 1k-batch into an SMT with {} key-value pairs over {:.3} milliseconds is {:.3} milliseconds",
|
||||
size,
|
||||
compute_elapsed.as_secs_f32() * 1000f32,
|
||||
// Dividing by the number of iterations, 1000, and then multiplying by 1000 to get
|
||||
// milliseconds, cancels out.
|
||||
compute_elapsed.as_secs_f32(),
|
||||
);
|
||||
|
||||
println!(
|
||||
"An average batch application time measured by a 1k-batch into an SMT with {} key-value pairs over {:.3} milliseconds is {:.3} milliseconds",
|
||||
size,
|
||||
apply_elapsed.as_secs_f32() * 1000f32,
|
||||
// Dividing by the number of iterations, 1000, and then multiplying by 1000 to get
|
||||
// milliseconds, cancels out.
|
||||
apply_elapsed.as_secs_f32(),
|
||||
);
|
||||
|
||||
println!(
|
||||
"An average batch insertion time measured by a 1k-batch into an SMT with {} key-value pairs totals to {:.3} milliseconds",
|
||||
size,
|
||||
(compute_elapsed + apply_elapsed).as_secs_f32() * 1000f32,
|
||||
);
|
||||
|
||||
println!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the proof generation benchmark for the [`Smt`].
|
||||
pub fn proof_generation(tree: &mut Smt, size: u64) -> Result<(), MerkleError> {
|
||||
println!("Running a proof generation benchmark:");
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use core::slice;
|
||||
|
||||
use super::{Felt, RpoDigest, EMPTY_WORD};
|
||||
use super::{smt::InnerNode, Felt, RpoDigest, EMPTY_WORD};
|
||||
|
||||
// EMPTY NODES SUBTREES
|
||||
// ================================================================================================
|
||||
|
@ -25,6 +25,17 @@ impl EmptySubtreeRoots {
|
|||
let pos = 255 - tree_depth + node_depth;
|
||||
&EMPTY_SUBTREES[pos as usize]
|
||||
}
|
||||
|
||||
/// Returns a sparse Merkle tree [`InnerNode`] with two empty children.
|
||||
///
|
||||
/// # Note
|
||||
/// `node_depth` is the depth of the **parent** to have empty children. That is, `node_depth`
|
||||
/// and the depth of the returned [`InnerNode`] are the same, and thus the empty hashes are for
|
||||
/// subtrees of depth `node_depth + 1`.
|
||||
pub(crate) const fn get_inner_node(tree_depth: u8, node_depth: u8) -> InnerNode {
|
||||
let &child = Self::entry(tree_depth, node_depth + 1);
|
||||
InnerNode { left: child, right: child }
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY_SUBTREES: [RpoDigest; 256] = [
|
||||
|
|
|
@ -33,22 +33,22 @@ impl fmt::Display for MerkleError {
|
|||
DuplicateValuesForKey(key) => write!(f, "multiple values provided for key {key}"),
|
||||
InvalidIndex { depth, value } => {
|
||||
write!(f, "the index value {value} is not valid for the depth {depth}")
|
||||
}
|
||||
},
|
||||
InvalidDepth { expected, provided } => {
|
||||
write!(f, "the provided depth {provided} is not valid for {expected}")
|
||||
}
|
||||
},
|
||||
InvalidSubtreeDepth { subtree_depth, tree_depth } => {
|
||||
write!(f, "tried inserting a subtree of depth {subtree_depth} into a tree of depth {tree_depth}")
|
||||
}
|
||||
},
|
||||
InvalidPath(_path) => write!(f, "the provided path is not valid"),
|
||||
InvalidNumEntries(max) => write!(f, "number of entries exceeded the maximum: {max}"),
|
||||
NodeNotInSet(index) => write!(f, "the node with index ({index}) is not in the set"),
|
||||
NodeNotInStore(hash, index) => {
|
||||
write!(f, "the node {hash:?} with index ({index}) is not in the store")
|
||||
}
|
||||
},
|
||||
NumLeavesNotPowerOfTwo(leaves) => {
|
||||
write!(f, "the leaves count {leaves} is not a power of 2")
|
||||
}
|
||||
},
|
||||
RootNotInStore(root) => write!(f, "the root {:?} is not in the store", root),
|
||||
SmtLeaf(smt_leaf_error) => write!(f, "smt leaf error: {smt_leaf_error}"),
|
||||
}
|
||||
|
|
|
@ -211,7 +211,7 @@ pub struct InnerNodeIterator<'a> {
|
|||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for InnerNodeIterator<'a> {
|
||||
impl Iterator for InnerNodeIterator<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use super::super::RpoDigest;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::super::RpoDigest;
|
||||
|
||||
/// Container for the update data of a [super::PartialMmr]
|
||||
#[derive(Debug)]
|
||||
pub struct MmrDelta {
|
||||
|
|
|
@ -9,6 +9,7 @@ pub enum MmrError {
|
|||
InvalidPosition(usize),
|
||||
InvalidPeaks,
|
||||
InvalidPeak,
|
||||
PeakOutOfBounds(usize, usize),
|
||||
InvalidUpdate,
|
||||
UnknownPeak,
|
||||
MerkleError(MerkleError),
|
||||
|
@ -21,11 +22,16 @@ impl Display for MmrError {
|
|||
MmrError::InvalidPeaks => write!(fmt, "Invalid peaks count"),
|
||||
MmrError::InvalidPeak => {
|
||||
write!(fmt, "Peak values does not match merkle path computed root")
|
||||
}
|
||||
MmrError::InvalidUpdate => write!(fmt, "Invalid mmr update"),
|
||||
},
|
||||
MmrError::PeakOutOfBounds(peak_idx, peaks_len) => write!(
|
||||
fmt,
|
||||
"Requested peak index is {} but the number of peaks is {}",
|
||||
peak_idx, peaks_len
|
||||
),
|
||||
MmrError::InvalidUpdate => write!(fmt, "Invalid Mmr update"),
|
||||
MmrError::UnknownPeak => {
|
||||
write!(fmt, "Peak not in Mmr")
|
||||
}
|
||||
},
|
||||
MmrError::MerkleError(err) => write!(fmt, "{}", err),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,13 +10,14 @@
|
|||
//! depths, i.e. as part of adding a new element to the forest the trees with same depth are
|
||||
//! merged, creating a new tree with depth d+1, this process is continued until the property is
|
||||
//! reestablished.
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{
|
||||
super::{InnerNodeInfo, MerklePath},
|
||||
bit::TrueBitPositionIterator,
|
||||
leaf_to_corresponding_tree, nodes_in_forest, MmrDelta, MmrError, MmrPeaks, MmrProof, Rpo256,
|
||||
RpoDigest,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// MMR
|
||||
// ===============================================================================================
|
||||
|
@ -72,19 +73,36 @@ impl Mmr {
|
|||
// FUNCTIONALITY
|
||||
// ============================================================================================
|
||||
|
||||
/// Given a leaf position, returns the Merkle path to its corresponding peak. If the position
|
||||
/// is greater-or-equal than the tree size an error is returned.
|
||||
/// Returns an [MmrProof] for the leaf at the specified position.
|
||||
///
|
||||
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
|
||||
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
|
||||
/// has position 0, the second position 1, and so on.
|
||||
pub fn open(&self, pos: usize, target_forest: usize) -> Result<MmrProof, MmrError> {
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the specified leaf position is out of bounds for this MMR.
|
||||
pub fn open(&self, pos: usize) -> Result<MmrProof, MmrError> {
|
||||
self.open_at(pos, self.forest)
|
||||
}
|
||||
|
||||
/// Returns an [MmrProof] for the leaf at the specified position using the state of the MMR
|
||||
/// at the specified `forest`.
|
||||
///
|
||||
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
|
||||
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
|
||||
/// has position 0, the second position 1, and so on.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - The specified leaf position is out of bounds for this MMR.
|
||||
/// - The specified `forest` value is not valid for this MMR.
|
||||
pub fn open_at(&self, pos: usize, forest: usize) -> Result<MmrProof, MmrError> {
|
||||
// find the target tree responsible for the MMR position
|
||||
let tree_bit =
|
||||
leaf_to_corresponding_tree(pos, target_forest).ok_or(MmrError::InvalidPosition(pos))?;
|
||||
leaf_to_corresponding_tree(pos, forest).ok_or(MmrError::InvalidPosition(pos))?;
|
||||
|
||||
// isolate the trees before the target
|
||||
let forest_before = target_forest & high_bitmask(tree_bit + 1);
|
||||
let forest_before = forest & high_bitmask(tree_bit + 1);
|
||||
let index_offset = nodes_in_forest(forest_before);
|
||||
|
||||
// update the value position from global to the target tree
|
||||
|
@ -94,7 +112,7 @@ impl Mmr {
|
|||
let (_, path) = self.collect_merkle_path_and_value(tree_bit, relative_pos, index_offset);
|
||||
|
||||
Ok(MmrProof {
|
||||
forest: target_forest,
|
||||
forest,
|
||||
position: pos,
|
||||
merkle_path: MerklePath::new(path),
|
||||
})
|
||||
|
@ -145,8 +163,16 @@ impl Mmr {
|
|||
self.forest += 1;
|
||||
}
|
||||
|
||||
/// Returns an peaks of the MMR for the version specified by `forest`.
|
||||
pub fn peaks(&self, forest: usize) -> Result<MmrPeaks, MmrError> {
|
||||
/// Returns the current peaks of the MMR.
|
||||
pub fn peaks(&self) -> MmrPeaks {
|
||||
self.peaks_at(self.forest).expect("failed to get peaks at current forest")
|
||||
}
|
||||
|
||||
/// Returns the peaks of the MMR at the state specified by `forest`.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the specified `forest` value is not valid for this MMR.
|
||||
pub fn peaks_at(&self, forest: usize) -> Result<MmrPeaks, MmrError> {
|
||||
if forest > self.forest {
|
||||
return Err(MmrError::InvalidPeaks);
|
||||
}
|
||||
|
@ -344,7 +370,7 @@ pub struct MmrNodes<'a> {
|
|||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for MmrNodes<'a> {
|
||||
impl Iterator for MmrNodes<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
@ -377,7 +403,8 @@ impl<'a> Iterator for MmrNodes<'a> {
|
|||
// the next parent position is one above the position of the pair
|
||||
let parent = self.last_right << 1;
|
||||
|
||||
// the left node has been paired and the current parent yielded, removed it from the forest
|
||||
// the left node has been paired and the current parent yielded, removed it from the
|
||||
// forest
|
||||
self.forest ^= self.last_right;
|
||||
if self.forest & parent == 0 {
|
||||
// this iteration yielded the left parent node
|
||||
|
|
|
@ -10,8 +10,6 @@ mod proof;
|
|||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use super::{Felt, Rpo256, RpoDigest, Word};
|
||||
|
||||
// REEXPORTS
|
||||
// ================================================================================================
|
||||
pub use delta::MmrDelta;
|
||||
|
@ -22,6 +20,8 @@ pub use partial::PartialMmr;
|
|||
pub use peaks::MmrPeaks;
|
||||
pub use proof::MmrProof;
|
||||
|
||||
use super::{Felt, Rpo256, RpoDigest, Word};
|
||||
|
||||
// UTILITIES
|
||||
// ===============================================================================================
|
||||
|
||||
|
@ -42,8 +42,8 @@ const fn leaf_to_corresponding_tree(pos: usize, forest: usize) -> Option<u32> {
|
|||
// - this means the first tree owns from `0` up to the `2^k_0` first positions, where `k_0`
|
||||
// is the highest true bit position, the second tree from `2^k_0 + 1` up to `2^k_1` where
|
||||
// `k_1` is the second highest bit, so on.
|
||||
// - this means the highest bits work as a category marker, and the position is owned by
|
||||
// the first tree which doesn't share a high bit with the position
|
||||
// - this means the highest bits work as a category marker, and the position is owned by the
|
||||
// first tree which doesn't share a high bit with the position
|
||||
let before = forest & pos;
|
||||
let after = forest ^ before;
|
||||
let tree = after.ilog2();
|
||||
|
|
|
@ -539,7 +539,7 @@ pub struct InnerNodeIterator<'a, I: Iterator<Item = (usize, RpoDigest)>> {
|
|||
seen_nodes: BTreeSet<InOrderIndex>,
|
||||
}
|
||||
|
||||
impl<'a, I: Iterator<Item = (usize, RpoDigest)>> Iterator for InnerNodeIterator<'a, I> {
|
||||
impl<I: Iterator<Item = (usize, RpoDigest)>> Iterator for InnerNodeIterator<'_, I> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
@ -716,18 +716,18 @@ mod tests {
|
|||
// build an MMR with 10 nodes (2 peaks) and a partial MMR based on it
|
||||
let mut mmr = Mmr::default();
|
||||
(0..10).for_each(|i| mmr.add(int_to_node(i)));
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
// add authentication path for position 1 and 8
|
||||
{
|
||||
let node = mmr.get(1).unwrap();
|
||||
let proof = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(1).unwrap();
|
||||
partial_mmr.track(1, node, &proof.merkle_path).unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let node = mmr.get(8).unwrap();
|
||||
let proof = mmr.open(8, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(8).unwrap();
|
||||
partial_mmr.track(8, node, &proof.merkle_path).unwrap();
|
||||
}
|
||||
|
||||
|
@ -740,7 +740,7 @@ mod tests {
|
|||
validate_apply_delta(&mmr, &mut partial_mmr);
|
||||
{
|
||||
let node = mmr.get(12).unwrap();
|
||||
let proof = mmr.open(12, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(12).unwrap();
|
||||
partial_mmr.track(12, node, &proof.merkle_path).unwrap();
|
||||
assert!(partial_mmr.track_latest);
|
||||
}
|
||||
|
@ -765,7 +765,7 @@ mod tests {
|
|||
let nodes_delta = partial.apply(delta).unwrap();
|
||||
|
||||
// new peaks were computed correctly
|
||||
assert_eq!(mmr.peaks(mmr.forest()).unwrap(), partial.peaks());
|
||||
assert_eq!(mmr.peaks(), partial.peaks());
|
||||
|
||||
let mut expected_nodes = nodes_before;
|
||||
for (key, value) in nodes_delta {
|
||||
|
@ -781,7 +781,7 @@ mod tests {
|
|||
let index_value: u64 = index.into();
|
||||
let pos = index_value / 2;
|
||||
let proof1 = partial.open(pos as usize).unwrap().unwrap();
|
||||
let proof2 = mmr.open(pos as usize, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(pos as usize).unwrap();
|
||||
assert_eq!(proof1, proof2);
|
||||
}
|
||||
}
|
||||
|
@ -790,16 +790,16 @@ mod tests {
|
|||
fn test_partial_mmr_inner_nodes_iterator() {
|
||||
// build the MMR
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let first_peak = mmr.peaks(mmr.forest).unwrap().peaks()[0];
|
||||
let first_peak = mmr.peaks().peaks()[0];
|
||||
|
||||
// -- test single tree ----------------------------
|
||||
|
||||
// get path and node for position 1
|
||||
let node1 = mmr.get(1).unwrap();
|
||||
let proof1 = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof1 = mmr.open(1).unwrap();
|
||||
|
||||
// create partial MMR and add authentication path to node at position 1
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
|
||||
// empty iterator should have no nodes
|
||||
|
@ -817,13 +817,13 @@ mod tests {
|
|||
// -- test no duplicates --------------------------
|
||||
|
||||
// build the partial MMR
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
let node0 = mmr.get(0).unwrap();
|
||||
let proof0 = mmr.open(0, mmr.forest()).unwrap();
|
||||
let proof0 = mmr.open(0).unwrap();
|
||||
|
||||
let node2 = mmr.get(2).unwrap();
|
||||
let proof2 = mmr.open(2, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(2).unwrap();
|
||||
|
||||
partial_mmr.track(0, node0, &proof0.merkle_path).unwrap();
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
|
@ -854,10 +854,10 @@ mod tests {
|
|||
// -- test multiple trees -------------------------
|
||||
|
||||
// build the partial MMR
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks(mmr.forest()).unwrap().into();
|
||||
let mut partial_mmr: PartialMmr = mmr.peaks().into();
|
||||
|
||||
let node5 = mmr.get(5).unwrap();
|
||||
let proof5 = mmr.open(5, mmr.forest()).unwrap();
|
||||
let proof5 = mmr.open(5).unwrap();
|
||||
|
||||
partial_mmr.track(1, node1, &proof1.merkle_path).unwrap();
|
||||
partial_mmr.track(5, node5, &proof5.merkle_path).unwrap();
|
||||
|
@ -869,7 +869,7 @@ mod tests {
|
|||
let index1 = NodeIndex::new(2, 1).unwrap();
|
||||
let index5 = NodeIndex::new(1, 1).unwrap();
|
||||
|
||||
let second_peak = mmr.peaks(mmr.forest).unwrap().peaks()[1];
|
||||
let second_peak = mmr.peaks().peaks()[1];
|
||||
|
||||
let path1 = store.get_path(first_peak, index1).unwrap().path;
|
||||
let path5 = store.get_path(second_peak, index5).unwrap().path;
|
||||
|
@ -888,8 +888,7 @@ mod tests {
|
|||
mmr.add(el);
|
||||
partial_mmr.add(el, false);
|
||||
|
||||
let mmr_peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
assert_eq!(mmr_peaks, partial_mmr.peaks());
|
||||
assert_eq!(mmr.peaks(), partial_mmr.peaks());
|
||||
assert_eq!(mmr.forest(), partial_mmr.forest());
|
||||
}
|
||||
}
|
||||
|
@ -905,12 +904,11 @@ mod tests {
|
|||
mmr.add(el);
|
||||
partial_mmr.add(el, true);
|
||||
|
||||
let mmr_peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
assert_eq!(mmr_peaks, partial_mmr.peaks());
|
||||
assert_eq!(mmr.peaks(), partial_mmr.peaks());
|
||||
assert_eq!(mmr.forest(), partial_mmr.forest());
|
||||
|
||||
for pos in 0..i {
|
||||
let mmr_proof = mmr.open(pos as usize, mmr.forest()).unwrap();
|
||||
let mmr_proof = mmr.open(pos as usize).unwrap();
|
||||
let partialmmr_proof = partial_mmr.open(pos as usize).unwrap().unwrap();
|
||||
assert_eq!(mmr_proof, partialmmr_proof);
|
||||
}
|
||||
|
@ -922,8 +920,8 @@ mod tests {
|
|||
let mut mmr = Mmr::from((0..7).map(int_to_node));
|
||||
|
||||
// derive a partial Mmr from it which tracks authentication path to leaf 5
|
||||
let mut partial_mmr = PartialMmr::from_peaks(mmr.peaks(mmr.forest()).unwrap());
|
||||
let path_to_5 = mmr.open(5, mmr.forest()).unwrap().merkle_path;
|
||||
let mut partial_mmr = PartialMmr::from_peaks(mmr.peaks());
|
||||
let path_to_5 = mmr.open(5).unwrap().merkle_path;
|
||||
let leaf_at_5 = mmr.get(5).unwrap();
|
||||
partial_mmr.track(5, leaf_at_5, &path_to_5).unwrap();
|
||||
|
||||
|
@ -933,14 +931,13 @@ mod tests {
|
|||
partial_mmr.add(leaf_at_7, false);
|
||||
|
||||
// the openings should be the same
|
||||
assert_eq!(mmr.open(5, mmr.forest()).unwrap(), partial_mmr.open(5).unwrap().unwrap());
|
||||
assert_eq!(mmr.open(5).unwrap(), partial_mmr.open(5).unwrap().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_mmr_serialization() {
|
||||
let mmr = Mmr::from((0..7).map(int_to_node));
|
||||
let forest_size = mmr.forest();
|
||||
let partial_mmr = PartialMmr::from_peaks(mmr.peaks(forest_size).unwrap());
|
||||
let partial_mmr = PartialMmr::from_peaks(mmr.peaks());
|
||||
|
||||
let bytes = partial_mmr.to_bytes();
|
||||
let decoded = PartialMmr::read_from_bytes(&bytes).unwrap();
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{super::ZERO, Felt, MmrError, MmrProof, Rpo256, RpoDigest, Word};
|
||||
|
||||
// MMR PEAKS
|
||||
// ================================================================================================
|
||||
|
||||
|
@ -18,12 +19,12 @@ pub struct MmrPeaks {
|
|||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number
|
||||
/// of peaks, in this case there are 2 peaks. The 0-indexed least-significant position of
|
||||
/// the bit determines the number of elements of a tree, so the rightmost tree has `2**0`
|
||||
/// elements and the left most has `2**2`.
|
||||
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the
|
||||
/// leftmost tree has `2**3=8` elements, and the right most has `2**2=4` elements.
|
||||
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number of peaks,
|
||||
/// in this case there are 2 peaks. The 0-indexed least-significant position of the bit
|
||||
/// determines the number of elements of a tree, so the rightmost tree has `2**0` elements
|
||||
/// and the left most has `2**2`.
|
||||
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the leftmost tree has
|
||||
/// `2**3=8` elements, and the right most has `2**2=4` elements.
|
||||
num_leaves: usize,
|
||||
|
||||
/// All the peaks of every tree in the MMR forest. The peaks are always ordered by number of
|
||||
|
@ -68,6 +69,17 @@ impl MmrPeaks {
|
|||
&self.peaks
|
||||
}
|
||||
|
||||
/// Returns the peak by the provided index.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the provided peak index is greater or equal to the current number of
|
||||
/// peaks in the Mmr.
|
||||
pub fn get_peak(&self, peak_idx: usize) -> Result<&RpoDigest, MmrError> {
|
||||
self.peaks
|
||||
.get(peak_idx)
|
||||
.ok_or(MmrError::PeakOutOfBounds(peak_idx, self.peaks.len()))
|
||||
}
|
||||
|
||||
/// Converts this [MmrPeaks] into its components: number of leaves and a vector of peaks of
|
||||
/// the underlying MMR.
|
||||
pub fn into_parts(self) -> (usize, Vec<RpoDigest>) {
|
||||
|
@ -83,9 +95,18 @@ impl MmrPeaks {
|
|||
Rpo256::hash_elements(&self.flatten_and_pad_peaks())
|
||||
}
|
||||
|
||||
pub fn verify(&self, value: RpoDigest, opening: MmrProof) -> bool {
|
||||
let root = &self.peaks[opening.peak_index()];
|
||||
opening.merkle_path.verify(opening.relative_pos() as u64, value, root)
|
||||
/// Verifies the Merkle opening proof.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - provided opening proof is invalid.
|
||||
/// - Mmr root value computed using the provided leaf value differs from the actual one.
|
||||
pub fn verify(&self, value: RpoDigest, opening: MmrProof) -> Result<(), MmrError> {
|
||||
let root = self.get_peak(opening.peak_index())?;
|
||||
opening
|
||||
.merkle_path
|
||||
.verify(opening.relative_pos() as u64, value, root)
|
||||
.map_err(MmrError::MerkleError)
|
||||
}
|
||||
|
||||
/// Flattens and pads the peaks to make hashing inside of the Miden VM easier.
|
||||
|
@ -94,16 +115,15 @@ impl MmrPeaks {
|
|||
/// - Flatten the vector of Words into a vector of Felts.
|
||||
/// - Pad the peaks with ZERO to an even number of words, this removes the need to handle RPO
|
||||
/// padding.
|
||||
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of
|
||||
/// hashing.
|
||||
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of hashing.
|
||||
pub fn flatten_and_pad_peaks(&self) -> Vec<Felt> {
|
||||
let num_peaks = self.peaks.len();
|
||||
|
||||
// To achieve the padding rules above we calculate the length of the final vector.
|
||||
// This is calculated as the number of field elements. Each peak is 4 field elements.
|
||||
// The length is calculated as follows:
|
||||
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires
|
||||
// 64 field elements.
|
||||
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires 64
|
||||
// field elements.
|
||||
// - If there are more than 16 peaks and the number of peaks is odd, the data is padded to
|
||||
// an even number of peaks and as such requires `(num_peaks + 1) * 4` field elements.
|
||||
// - If there are more than 16 peaks and the number of peaks is even, the data is not padded
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use alloc::vec::Vec;
|
||||
|
||||
use super::{
|
||||
super::{InnerNodeInfo, Rpo256, RpoDigest},
|
||||
bit::TrueBitPositionIterator,
|
||||
|
@ -8,7 +10,6 @@ use crate::{
|
|||
merkle::{int_to_node, InOrderIndex, MerklePath, MerkleTree, MmrProof, NodeIndex},
|
||||
Felt, Word,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn test_position_equal_or_higher_than_leafs_is_never_contained() {
|
||||
|
@ -138,7 +139,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 1);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 1);
|
||||
assert_eq!(acc.peaks(), &[postorder[0]]);
|
||||
|
||||
|
@ -147,7 +148,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 3);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 2);
|
||||
assert_eq!(acc.peaks(), &[postorder[2]]);
|
||||
|
||||
|
@ -156,7 +157,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 4);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 3);
|
||||
assert_eq!(acc.peaks(), &[postorder[2], postorder[3]]);
|
||||
|
||||
|
@ -165,7 +166,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 7);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 4);
|
||||
assert_eq!(acc.peaks(), &[postorder[6]]);
|
||||
|
||||
|
@ -174,7 +175,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 8);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 5);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[7]]);
|
||||
|
||||
|
@ -183,7 +184,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 10);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 6);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[9]]);
|
||||
|
||||
|
@ -192,7 +193,7 @@ fn test_mmr_simple() {
|
|||
assert_eq!(mmr.nodes.len(), 11);
|
||||
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
|
||||
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
assert_eq!(acc.num_leaves(), 7);
|
||||
assert_eq!(acc.peaks(), &[postorder[6], postorder[9], postorder[10]]);
|
||||
}
|
||||
|
@ -204,97 +205,73 @@ fn test_mmr_open() {
|
|||
let h23 = merge(LEAVES[2], LEAVES[3]);
|
||||
|
||||
// node at pos 7 is the root
|
||||
assert!(
|
||||
mmr.open(7, mmr.forest()).is_err(),
|
||||
"Element 7 is not in the tree, result should be None"
|
||||
);
|
||||
assert!(mmr.open(7).is_err(), "Element 7 is not in the tree, result should be None");
|
||||
|
||||
// node at pos 6 is the root
|
||||
let empty: MerklePath = MerklePath::new(vec![]);
|
||||
let opening = mmr
|
||||
.open(6, mmr.forest())
|
||||
.open(6)
|
||||
.expect("Element 6 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, empty);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 6);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[6], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[6], opening).unwrap();
|
||||
|
||||
// nodes 4,5 are depth 1
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[4]]);
|
||||
let opening = mmr
|
||||
.open(5, mmr.forest())
|
||||
.open(5)
|
||||
.expect("Element 5 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 5);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[5], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[5], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[5]]);
|
||||
let opening = mmr
|
||||
.open(4, mmr.forest())
|
||||
.open(4)
|
||||
.expect("Element 4 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 4);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[4], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[4], opening).unwrap();
|
||||
|
||||
// nodes 0,1,2,3 are detph 2
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[2], h01]);
|
||||
let opening = mmr
|
||||
.open(3, mmr.forest())
|
||||
.open(3)
|
||||
.expect("Element 3 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 3);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[3], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[3], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[3], h01]);
|
||||
let opening = mmr
|
||||
.open(2, mmr.forest())
|
||||
.open(2)
|
||||
.expect("Element 2 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 2);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[2], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[2], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[0], h23]);
|
||||
let opening = mmr
|
||||
.open(1, mmr.forest())
|
||||
.open(1)
|
||||
.expect("Element 1 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 1);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[1], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[1], opening).unwrap();
|
||||
|
||||
let root_to_path = MerklePath::new(vec![LEAVES[1], h23]);
|
||||
let opening = mmr
|
||||
.open(0, mmr.forest())
|
||||
.open(0)
|
||||
.expect("Element 0 is contained in the tree, expected an opening result.");
|
||||
assert_eq!(opening.merkle_path, root_to_path);
|
||||
assert_eq!(opening.forest, mmr.forest);
|
||||
assert_eq!(opening.position, 0);
|
||||
assert!(
|
||||
mmr.peaks(mmr.forest()).unwrap().verify(LEAVES[0], opening),
|
||||
"MmrProof should be valid for the current accumulator."
|
||||
);
|
||||
mmr.peaks().verify(LEAVES[0], opening).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -308,7 +285,7 @@ fn test_mmr_open_older_version() {
|
|||
// merkle path of a node is empty if there are no elements to pair with it
|
||||
for pos in (0..mmr.forest()).filter(is_even) {
|
||||
let forest = pos + 1;
|
||||
let proof = mmr.open(pos, forest).unwrap();
|
||||
let proof = mmr.open_at(pos, forest).unwrap();
|
||||
assert_eq!(proof.forest, forest);
|
||||
assert_eq!(proof.merkle_path.nodes(), []);
|
||||
assert_eq!(proof.position, pos);
|
||||
|
@ -320,7 +297,7 @@ fn test_mmr_open_older_version() {
|
|||
for pos in 0..4 {
|
||||
let idx = NodeIndex::new(2, pos).unwrap();
|
||||
let path = mtree.get_path(idx).unwrap();
|
||||
let proof = mmr.open(pos as usize, forest).unwrap();
|
||||
let proof = mmr.open_at(pos as usize, forest).unwrap();
|
||||
assert_eq!(path, proof.merkle_path);
|
||||
}
|
||||
}
|
||||
|
@ -331,7 +308,7 @@ fn test_mmr_open_older_version() {
|
|||
let path = mtree.get_path(idx).unwrap();
|
||||
// account for the bigger tree with 4 elements
|
||||
let mmr_pos = (pos + 4) as usize;
|
||||
let proof = mmr.open(mmr_pos, forest).unwrap();
|
||||
let proof = mmr.open_at(mmr_pos, forest).unwrap();
|
||||
assert_eq!(path, proof.merkle_path);
|
||||
}
|
||||
}
|
||||
|
@ -357,49 +334,49 @@ fn test_mmr_open_eight() {
|
|||
let root = mtree.root();
|
||||
|
||||
let position = 0;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 1;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 2;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 3;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 4;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 5;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 6;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
||||
let position = 7;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(position as u64, leaves[position]).unwrap(), root);
|
||||
|
@ -415,47 +392,47 @@ fn test_mmr_open_seven() {
|
|||
let mmr: Mmr = LEAVES.into();
|
||||
|
||||
let position = 0;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[0]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 1;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(1, LEAVES[1]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 2;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(2, LEAVES[2]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 3;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath =
|
||||
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(3, LEAVES[3]).unwrap(), mtree1.root());
|
||||
|
||||
let position = 4;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 0u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[4]).unwrap(), mtree2.root());
|
||||
|
||||
let position = 5;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 1u64).unwrap()).unwrap();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(1, LEAVES[5]).unwrap(), mtree2.root());
|
||||
|
||||
let position = 6;
|
||||
let proof = mmr.open(position, mmr.forest()).unwrap();
|
||||
let proof = mmr.open(position).unwrap();
|
||||
let merkle_path: MerklePath = [].as_ref().into();
|
||||
assert_eq!(proof, MmrProof { forest, position, merkle_path });
|
||||
assert_eq!(proof.merkle_path.compute_root(0, LEAVES[6]).unwrap(), LEAVES[6]);
|
||||
|
@ -479,7 +456,7 @@ fn test_mmr_invariants() {
|
|||
let mut mmr = Mmr::new();
|
||||
for v in 1..=1028 {
|
||||
mmr.add(int_to_node(v));
|
||||
let accumulator = mmr.peaks(mmr.forest()).unwrap();
|
||||
let accumulator = mmr.peaks();
|
||||
assert_eq!(v as usize, mmr.forest(), "MMR leaf count must increase by one on every add");
|
||||
assert_eq!(
|
||||
v as usize,
|
||||
|
@ -565,37 +542,37 @@ fn test_mmr_peaks() {
|
|||
let mmr: Mmr = LEAVES.into();
|
||||
|
||||
let forest = 0b0001;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[0]]);
|
||||
|
||||
let forest = 0b0010;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[2]]);
|
||||
|
||||
let forest = 0b0011;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[2], mmr.nodes[3]]);
|
||||
|
||||
let forest = 0b0100;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6]]);
|
||||
|
||||
let forest = 0b0101;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[7]]);
|
||||
|
||||
let forest = 0b0110;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9]]);
|
||||
|
||||
let forest = 0b0111;
|
||||
let acc = mmr.peaks(forest).unwrap();
|
||||
let acc = mmr.peaks_at(forest).unwrap();
|
||||
assert_eq!(acc.num_leaves(), forest);
|
||||
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9], mmr.nodes[10]]);
|
||||
}
|
||||
|
@ -603,7 +580,7 @@ fn test_mmr_peaks() {
|
|||
#[test]
|
||||
fn test_mmr_hash_peaks() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
let peaks = mmr.peaks();
|
||||
|
||||
let first_peak = Rpo256::merge(&[
|
||||
Rpo256::merge(&[LEAVES[0], LEAVES[1]]),
|
||||
|
@ -657,7 +634,7 @@ fn test_mmr_peaks_hash_odd() {
|
|||
#[test]
|
||||
fn test_mmr_delta() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
|
||||
// original_forest can't have more elements
|
||||
assert!(
|
||||
|
@ -757,7 +734,7 @@ fn test_mmr_delta_old_forest() {
|
|||
#[test]
|
||||
fn test_partial_mmr_simple() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let peaks = mmr.peaks(mmr.forest()).unwrap();
|
||||
let peaks = mmr.peaks();
|
||||
let mut partial: PartialMmr = peaks.clone().into();
|
||||
|
||||
// check initial state of the partial mmr
|
||||
|
@ -768,7 +745,7 @@ fn test_partial_mmr_simple() {
|
|||
assert_eq!(partial.nodes.len(), 0);
|
||||
|
||||
// check state after adding tracking one element
|
||||
let proof1 = mmr.open(0, mmr.forest()).unwrap();
|
||||
let proof1 = mmr.open(0).unwrap();
|
||||
let el1 = mmr.get(proof1.position).unwrap();
|
||||
partial.track(proof1.position, el1, &proof1.merkle_path).unwrap();
|
||||
|
||||
|
@ -780,7 +757,7 @@ fn test_partial_mmr_simple() {
|
|||
let idx = idx.parent();
|
||||
assert_eq!(partial.nodes[&idx.sibling()], proof1.merkle_path[1]);
|
||||
|
||||
let proof2 = mmr.open(1, mmr.forest()).unwrap();
|
||||
let proof2 = mmr.open(1).unwrap();
|
||||
let el2 = mmr.get(proof2.position).unwrap();
|
||||
partial.track(proof2.position, el2, &proof2.merkle_path).unwrap();
|
||||
|
||||
|
@ -798,9 +775,9 @@ fn test_partial_mmr_update_single() {
|
|||
let mut full = Mmr::new();
|
||||
let zero = int_to_node(0);
|
||||
full.add(zero);
|
||||
let mut partial: PartialMmr = full.peaks(full.forest()).unwrap().into();
|
||||
let mut partial: PartialMmr = full.peaks().into();
|
||||
|
||||
let proof = full.open(0, full.forest()).unwrap();
|
||||
let proof = full.open(0).unwrap();
|
||||
partial.track(proof.position, zero, &proof.merkle_path).unwrap();
|
||||
|
||||
for i in 1..100 {
|
||||
|
@ -810,9 +787,9 @@ fn test_partial_mmr_update_single() {
|
|||
partial.apply(delta).unwrap();
|
||||
|
||||
assert_eq!(partial.forest(), full.forest());
|
||||
assert_eq!(partial.peaks(), full.peaks(full.forest()).unwrap());
|
||||
assert_eq!(partial.peaks(), full.peaks());
|
||||
|
||||
let proof1 = full.open(i as usize, full.forest()).unwrap();
|
||||
let proof1 = full.open(i as usize).unwrap();
|
||||
partial.track(proof1.position, node, &proof1.merkle_path).unwrap();
|
||||
let proof2 = partial.open(proof1.position).unwrap().unwrap();
|
||||
assert_eq!(proof1.merkle_path, proof2.merkle_path);
|
||||
|
@ -822,7 +799,7 @@ fn test_partial_mmr_update_single() {
|
|||
#[test]
|
||||
fn test_mmr_add_invalid_odd_leaf() {
|
||||
let mmr: Mmr = LEAVES.into();
|
||||
let acc = mmr.peaks(mmr.forest()).unwrap();
|
||||
let acc = mmr.peaks();
|
||||
let mut partial: PartialMmr = acc.clone().into();
|
||||
|
||||
let empty = MerklePath::new(Vec::new());
|
||||
|
@ -837,6 +814,39 @@ fn test_mmr_add_invalid_odd_leaf() {
|
|||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
/// Tests that a proof whose peak count exceeds the peak count of the MMR returns an error.
|
||||
///
|
||||
/// Here we manipulate the proof to return a peak index of 1 while the MMR only has 1 peak (with
|
||||
/// index 0).
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mmr_proof_num_peaks_exceeds_current_num_peaks() {
|
||||
let mmr: Mmr = LEAVES[0..4].iter().cloned().into();
|
||||
let mut proof = mmr.open(3).unwrap();
|
||||
proof.forest = 5;
|
||||
proof.position = 4;
|
||||
mmr.peaks().verify(LEAVES[3], proof).unwrap();
|
||||
}
|
||||
|
||||
/// Tests that a proof whose peak count exceeds the peak count of the MMR returns an error.
|
||||
///
|
||||
/// We create an MmrProof for a leaf whose peak index to verify against is 1.
|
||||
/// Then we add another leaf which results in an Mmr with just one peak due to trees
|
||||
/// being merged. If we try to use the old proof against the new Mmr, we should get an error.
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mmr_old_proof_num_peaks_exceeds_current_num_peaks() {
|
||||
let leaves_len = 3;
|
||||
let mut mmr = Mmr::from(LEAVES[0..leaves_len].iter().cloned());
|
||||
|
||||
let leaf_idx = leaves_len - 1;
|
||||
let proof = mmr.open(leaf_idx).unwrap();
|
||||
assert!(mmr.peaks().verify(LEAVES[leaf_idx], proof.clone()).is_ok());
|
||||
|
||||
mmr.add(LEAVES[leaves_len]);
|
||||
mmr.peaks().verify(LEAVES[leaf_idx], proof).unwrap();
|
||||
}
|
||||
|
||||
mod property_tests {
|
||||
use proptest::prelude::*;
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ pub use path::{MerklePath, RootPath, ValuePath};
|
|||
|
||||
mod smt;
|
||||
pub use smt::{
|
||||
LeafIndex, SimpleSmt, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError, SMT_DEPTH,
|
||||
SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
LeafIndex, MutationSet, SimpleSmt, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError,
|
||||
SMT_DEPTH, SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
};
|
||||
|
||||
mod mmr;
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
use super::{
|
||||
super::{
|
||||
digests_to_words, int_to_node, DefaultMerkleStore as MerkleStore, MerkleTree, NodeIndex,
|
||||
|
@ -5,7 +7,6 @@ use super::{
|
|||
},
|
||||
Deserializable, InnerNodeInfo, RpoDigest, Serializable, ValuePath,
|
||||
};
|
||||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
|
@ -294,7 +295,8 @@ fn leaves() {
|
|||
assert!(expected_leaves.eq(pmt.leaves()));
|
||||
}
|
||||
|
||||
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected ones.
|
||||
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected
|
||||
/// ones.
|
||||
#[test]
|
||||
fn test_inner_node_iterator() {
|
||||
let mt = MerkleTree::new(digests_to_words(&VALUES8)).unwrap();
|
||||
|
|
|
@ -54,12 +54,17 @@ impl MerklePath {
|
|||
|
||||
/// Verifies the Merkle opening proof towards the provided root.
|
||||
///
|
||||
/// Returns `true` if `node` exists at `index` in a Merkle tree with `root`.
|
||||
pub fn verify(&self, index: u64, node: RpoDigest, root: &RpoDigest) -> bool {
|
||||
match self.compute_root(index, node) {
|
||||
Ok(computed_root) => root == &computed_root,
|
||||
Err(_) => false,
|
||||
/// # Errors
|
||||
/// Returns an error if:
|
||||
/// - provided node index is invalid.
|
||||
/// - root calculated during the verification differs from the provided one.
|
||||
pub fn verify(&self, index: u64, node: RpoDigest, root: &RpoDigest) -> Result<(), MerkleError> {
|
||||
let computed_root = self.compute_root(index, node)?;
|
||||
if &computed_root != root {
|
||||
return Err(MerkleError::ConflictingRoots(vec![computed_root, *root]));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns an iterator over every inner node of this [MerklePath].
|
||||
|
@ -143,7 +148,7 @@ pub struct InnerNodeIterator<'a> {
|
|||
value: RpoDigest,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for InnerNodeIterator<'a> {
|
||||
impl Iterator for InnerNodeIterator<'_> {
|
||||
type Item = InnerNodeInfo;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
|
|
@ -37,17 +37,17 @@ impl fmt::Display for SmtLeafError {
|
|||
match self {
|
||||
InvalidNumEntriesForMultiple(num_entries) => {
|
||||
write!(f, "Multiple leaf requires 2 or more entries. Got: {num_entries}")
|
||||
}
|
||||
},
|
||||
InconsistentKeys { entries, key_1, key_2 } => {
|
||||
write!(f, "Multiple leaf requires all keys to map to the same leaf index. Offending keys: {key_1} and {key_2}. Entries: {entries:?}.")
|
||||
}
|
||||
},
|
||||
SingleKeyInconsistentWithLeafIndex { key, leaf_index } => {
|
||||
write!(
|
||||
f,
|
||||
"Single key in leaf inconsistent with leaf index. Key: {key}, leaf index: {}",
|
||||
leaf_index.value()
|
||||
)
|
||||
}
|
||||
},
|
||||
MultipleKeysInconsistentWithLeafIndex {
|
||||
leaf_index_from_keys,
|
||||
leaf_index_supplied,
|
||||
|
@ -58,7 +58,7 @@ impl fmt::Display for SmtLeafError {
|
|||
leaf_index_from_keys.value(),
|
||||
leaf_index_supplied.value()
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ impl fmt::Display for SmtProofError {
|
|||
match self {
|
||||
InvalidPathLength(path_length) => {
|
||||
write!(f, "Invalid Merkle path length. Expected {SMT_DEPTH}, got {path_length}")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@ impl SmtLeaf {
|
|||
///
|
||||
/// # Errors
|
||||
/// - Returns an error if 2 keys in `entries` map to a different leaf index
|
||||
/// - Returns an error if 1 or more keys in `entries` map to a leaf index
|
||||
/// different from `leaf_index`
|
||||
/// - Returns an error if 1 or more keys in `entries` map to a leaf index different from
|
||||
/// `leaf_index`
|
||||
pub fn new(
|
||||
entries: Vec<(RpoDigest, Word)>,
|
||||
leaf_index: LeafIndex<SMT_DEPTH>,
|
||||
|
@ -39,7 +39,7 @@ impl SmtLeaf {
|
|||
}
|
||||
|
||||
Ok(Self::new_single(key, value))
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
let leaf = Self::new_multiple(entries)?;
|
||||
|
||||
|
@ -53,7 +53,7 @@ impl SmtLeaf {
|
|||
} else {
|
||||
Ok(leaf)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ impl SmtLeaf {
|
|||
// Note: All keys are guaranteed to have the same leaf index
|
||||
let (first_key, _) = entries[0];
|
||||
first_key.into()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ impl SmtLeaf {
|
|||
SmtLeaf::Single(_) => 1,
|
||||
SmtLeaf::Multiple(entries) => {
|
||||
entries.len().try_into().expect("shouldn't have more than 2^64 entries")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ impl SmtLeaf {
|
|||
SmtLeaf::Multiple(kvs) => {
|
||||
let elements: Vec<Felt> = kvs.iter().copied().flat_map(kv_to_elements).collect();
|
||||
Rpo256::hash_elements(&elements)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,8 @@ impl SmtLeaf {
|
|||
// HELPERS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another leaf.
|
||||
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another
|
||||
/// leaf.
|
||||
pub(super) fn get_value(&self, key: &RpoDigest) -> Option<Word> {
|
||||
// Ensure that `key` maps to this leaf
|
||||
if self.index() != key.into() {
|
||||
|
@ -197,7 +198,7 @@ impl SmtLeaf {
|
|||
} else {
|
||||
Some(EMPTY_WORD)
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
for (key_in_leaf, value_in_leaf) in kv_pairs {
|
||||
if key == key_in_leaf {
|
||||
|
@ -206,7 +207,7 @@ impl SmtLeaf {
|
|||
}
|
||||
|
||||
Some(EMPTY_WORD)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -219,7 +220,7 @@ impl SmtLeaf {
|
|||
SmtLeaf::Empty(_) => {
|
||||
*self = SmtLeaf::new_single(key, value);
|
||||
None
|
||||
}
|
||||
},
|
||||
SmtLeaf::Single(kv_pair) => {
|
||||
if kv_pair.0 == key {
|
||||
// the key is already in this leaf. Update the value and return the previous
|
||||
|
@ -237,7 +238,7 @@ impl SmtLeaf {
|
|||
|
||||
None
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
|
||||
Ok(pos) => {
|
||||
|
@ -245,14 +246,14 @@ impl SmtLeaf {
|
|||
kv_pairs[pos].1 = value;
|
||||
|
||||
Some(old_value)
|
||||
}
|
||||
},
|
||||
Err(pos) => {
|
||||
kv_pairs.insert(pos, (key, value));
|
||||
|
||||
None
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,7 +278,7 @@ impl SmtLeaf {
|
|||
// another key is stored at leaf; nothing to update
|
||||
(None, false)
|
||||
}
|
||||
}
|
||||
},
|
||||
SmtLeaf::Multiple(kv_pairs) => {
|
||||
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
|
||||
Ok(pos) => {
|
||||
|
@ -292,13 +293,13 @@ impl SmtLeaf {
|
|||
}
|
||||
|
||||
(Some(old_value), false)
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// other keys are stored at leaf; nothing to update
|
||||
(None, false)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -349,7 +350,7 @@ impl Deserializable for SmtLeaf {
|
|||
// ================================================================================================
|
||||
|
||||
/// Converts a key-value tuple to an iterator of `Felt`s
|
||||
fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt> {
|
||||
pub(crate) fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt> {
|
||||
let key_elements = key.into_iter();
|
||||
let value_elements = value.into_iter();
|
||||
|
||||
|
@ -358,7 +359,7 @@ fn kv_to_elements((key, value): (RpoDigest, Word)) -> impl Iterator<Item = Felt>
|
|||
|
||||
/// Compares two keys, compared element-by-element using their integer representations starting with
|
||||
/// the most significant element.
|
||||
fn cmp_keys(key_1: RpoDigest, key_2: RpoDigest) -> Ordering {
|
||||
pub(crate) fn cmp_keys(key_1: RpoDigest, key_2: RpoDigest) -> Ordering {
|
||||
for (v1, v2) in key_1.iter().zip(key_2.iter()).rev() {
|
||||
let v1 = v1.as_int();
|
||||
let v2 = v2.as_int();
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
use super::{
|
||||
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
|
||||
NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::{
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
string::ToString,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use super::{
|
||||
EmptySubtreeRoots, Felt, InnerNode, InnerNodeInfo, LeafIndex, MerkleError, MerklePath,
|
||||
MutationSet, NodeIndex, Rpo256, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
};
|
||||
|
||||
mod error;
|
||||
pub use error::{SmtLeafError, SmtProofError};
|
||||
|
||||
|
@ -32,8 +33,8 @@ pub const SMT_DEPTH: u8 = 64;
|
|||
/// Sparse Merkle tree mapping 256-bit keys to 256-bit values. Both keys and values are represented
|
||||
/// by 4 field elements.
|
||||
///
|
||||
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf to
|
||||
/// which the key maps.
|
||||
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf
|
||||
/// to which the key maps.
|
||||
///
|
||||
/// A leaf is either empty, or holds one or more key-value pairs. An empty leaf hashes to the empty
|
||||
/// word. Otherwise, a leaf hashes to the hash of its key-value pairs, ordered by key first, value
|
||||
|
@ -120,12 +121,7 @@ impl Smt {
|
|||
|
||||
/// Returns the value associated with `key`
|
||||
pub fn get_value(&self, key: &RpoDigest) -> Word {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
Some(leaf) => leaf.get_value(key).unwrap_or_default(),
|
||||
None => EMPTY_WORD,
|
||||
}
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::get_value(self, key)
|
||||
}
|
||||
|
||||
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
|
||||
|
@ -134,6 +130,12 @@ impl Smt {
|
|||
<Self as SparseMerkleTree<SMT_DEPTH>>::open(self, key)
|
||||
}
|
||||
|
||||
/// Returns a boolean value indicating whether the SMT is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
|
||||
self.root == Self::EMPTY_ROOT
|
||||
}
|
||||
|
||||
// ITERATORS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -171,6 +173,47 @@ impl Smt {
|
|||
<Self as SparseMerkleTree<SMT_DEPTH>>::insert(self, key, value)
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
|
||||
/// tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`Smt::apply_mutations()`] can be called in order to commit these changes to the Merkle
|
||||
/// tree, or [`drop()`] to discard them.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use miden_crypto::{hash::rpo::RpoDigest, Felt, Word};
|
||||
/// # use miden_crypto::merkle::{Smt, EmptySubtreeRoots, SMT_DEPTH};
|
||||
/// let mut smt = Smt::new();
|
||||
/// let pair = (RpoDigest::default(), Word::default());
|
||||
/// let mutations = smt.compute_mutations(vec![pair]);
|
||||
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
|
||||
/// smt.apply_mutations(mutations);
|
||||
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
|
||||
/// ```
|
||||
pub fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (RpoDigest, Word)>,
|
||||
) -> MutationSet<SMT_DEPTH, RpoDigest, Word> {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::compute_mutations(self, kv_pairs)
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`Smt::compute_mutations()`] to this tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
|
||||
/// the `mutations` were computed against, and the second item is the actual current root of
|
||||
/// this tree.
|
||||
pub fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<SMT_DEPTH, RpoDigest, Word>,
|
||||
) -> Result<(), MerkleError> {
|
||||
<Self as SparseMerkleTree<SMT_DEPTH>>::apply_mutations(self, mutations)
|
||||
}
|
||||
|
||||
// HELPERS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -187,7 +230,7 @@ impl Smt {
|
|||
self.leaves.insert(leaf_index.value(), SmtLeaf::Single((key, value)));
|
||||
|
||||
None
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,6 +258,7 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
|||
type Opening = SmtProof;
|
||||
|
||||
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
|
||||
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
|
||||
|
||||
fn root(&self) -> RpoDigest {
|
||||
self.root
|
||||
|
@ -225,11 +269,10 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
|||
}
|
||||
|
||||
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
|
||||
self.inner_nodes.get(&index).cloned().unwrap_or_else(|| {
|
||||
let node = EmptySubtreeRoots::entry(SMT_DEPTH, index.depth() + 1);
|
||||
|
||||
InnerNode { left: *node, right: *node }
|
||||
})
|
||||
self.inner_nodes
|
||||
.get(&index)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()))
|
||||
}
|
||||
|
||||
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) {
|
||||
|
@ -249,6 +292,15 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_value(&self, key: &Self::Key) -> Self::Value {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
Some(leaf) => leaf.get_value(key).unwrap_or_default(),
|
||||
None => EMPTY_WORD,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_leaf(&self, key: &RpoDigest) -> Self::Leaf {
|
||||
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
|
||||
|
||||
|
@ -262,6 +314,28 @@ impl SparseMerkleTree<SMT_DEPTH> for Smt {
|
|||
leaf.hash()
|
||||
}
|
||||
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
mut existing_leaf: SmtLeaf,
|
||||
key: &RpoDigest,
|
||||
value: &Word,
|
||||
) -> SmtLeaf {
|
||||
debug_assert_eq!(existing_leaf.index(), Self::key_to_leaf_index(key));
|
||||
|
||||
match existing_leaf {
|
||||
SmtLeaf::Empty(_) => SmtLeaf::new_single(*key, *value),
|
||||
_ => {
|
||||
if *value != EMPTY_WORD {
|
||||
existing_leaf.insert(*key, *value);
|
||||
} else {
|
||||
existing_leaf.remove(*key);
|
||||
}
|
||||
|
||||
existing_leaf
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn key_to_leaf_index(key: &RpoDigest) -> LeafIndex<SMT_DEPTH> {
|
||||
let most_significant_felt = key[3];
|
||||
LeafIndex::new_max_depth(most_significant_felt.as_int())
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use alloc::string::ToString;
|
||||
|
||||
use super::{MerklePath, RpoDigest, SmtLeaf, SmtProofError, Word, SMT_DEPTH};
|
||||
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
|
||||
use alloc::string::ToString;
|
||||
|
||||
/// A proof which can be used to assert membership (or non-membership) of key-value pairs in a
|
||||
/// [`super::Smt`].
|
||||
|
@ -57,7 +58,7 @@ impl SmtProof {
|
|||
|
||||
// make sure the Merkle path resolves to the correct root
|
||||
self.compute_root() == *root
|
||||
}
|
||||
},
|
||||
// If the key maps to a different leaf, the proof cannot verify membership of `value`
|
||||
None => false,
|
||||
}
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
use alloc::vec::Vec;
|
||||
|
||||
use super::{Felt, LeafIndex, NodeIndex, Rpo256, RpoDigest, Smt, SmtLeaf, EMPTY_WORD, SMT_DEPTH};
|
||||
use crate::{
|
||||
merkle::{EmptySubtreeRoots, MerkleStore},
|
||||
merkle::{smt::SparseMerkleTree, EmptySubtreeRoots, MerkleStore},
|
||||
utils::{Deserializable, Serializable},
|
||||
Word, ONE, WORD_SIZE,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// SMT
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
@ -257,6 +258,195 @@ fn test_smt_removal() {
|
|||
}
|
||||
}
|
||||
|
||||
/// This tests that we can correctly calculate prospective leaves -- that is, we can construct
|
||||
/// correct [`SmtLeaf`] values for a theoretical insertion on a Merkle tree without mutating or
|
||||
/// cloning the tree.
|
||||
#[test]
|
||||
fn test_prospective_hash() {
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
|
||||
|
||||
let key_1: RpoDigest = RpoDigest::from([ONE, ONE, ONE, Felt::new(raw)]);
|
||||
let key_2: RpoDigest =
|
||||
RpoDigest::from([2_u32.into(), 2_u32.into(), 2_u32.into(), Felt::new(raw)]);
|
||||
// Sort key_3 before key_1, to test non-append insertion.
|
||||
let key_3: RpoDigest =
|
||||
RpoDigest::from([0_u32.into(), 0_u32.into(), 0_u32.into(), Felt::new(raw)]);
|
||||
|
||||
let value_1 = [ONE; WORD_SIZE];
|
||||
let value_2 = [2_u32.into(); WORD_SIZE];
|
||||
let value_3: [Felt; 4] = [3_u32.into(); WORD_SIZE];
|
||||
|
||||
// insert key-value 1
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &value_1).hash();
|
||||
smt.insert(key_1, value_1);
|
||||
|
||||
let leaf = smt.get_leaf(&key_1);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// insert key-value 2
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &value_2).hash();
|
||||
smt.insert(key_2, value_2);
|
||||
|
||||
let leaf = smt.get_leaf(&key_2);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// insert key-value 3
|
||||
{
|
||||
let prospective =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &value_3).hash();
|
||||
smt.insert(key_3, value_3);
|
||||
|
||||
let leaf = smt.get_leaf(&key_3);
|
||||
assert_eq!(
|
||||
prospective,
|
||||
leaf.hash(),
|
||||
"prospective hash for leaf {leaf:?} did not match actual hash",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 3
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_3);
|
||||
let old_value_3 = smt.insert(key_3, EMPTY_WORD);
|
||||
assert_eq!(old_value_3, value_3);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &old_value_3);
|
||||
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 2
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_2);
|
||||
let old_value_2 = smt.insert(key_2, EMPTY_WORD);
|
||||
assert_eq!(old_value_2, value_2);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &old_value_2);
|
||||
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
|
||||
// remove key 1
|
||||
{
|
||||
let old_leaf = smt.get_leaf(&key_1);
|
||||
let old_value_1 = smt.insert(key_1, EMPTY_WORD);
|
||||
assert_eq!(old_value_1, value_1);
|
||||
let prospective_leaf =
|
||||
smt.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &old_value_1);
|
||||
assert_eq!(
|
||||
old_leaf.hash(),
|
||||
prospective_leaf.hash(),
|
||||
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
|
||||
\n original leaf: {old_leaf:?}\
|
||||
\n prospective leaf: {prospective_leaf:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// This tests that we can perform prospective changes correctly.
|
||||
#[test]
|
||||
fn test_prospective_insertion() {
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
|
||||
|
||||
let key_1: RpoDigest = RpoDigest::from([ONE, ONE, ONE, Felt::new(raw)]);
|
||||
let key_2: RpoDigest =
|
||||
RpoDigest::from([2_u32.into(), 2_u32.into(), 2_u32.into(), Felt::new(raw)]);
|
||||
// Sort key_3 before key_1, to test non-append insertion.
|
||||
let key_3: RpoDigest =
|
||||
RpoDigest::from([0_u32.into(), 0_u32.into(), 0_u32.into(), Felt::new(raw)]);
|
||||
|
||||
let value_1 = [ONE; WORD_SIZE];
|
||||
let value_2 = [2_u32.into(); WORD_SIZE];
|
||||
let value_3: [Felt; 4] = [3_u32.into(); WORD_SIZE];
|
||||
|
||||
let root_empty = smt.root();
|
||||
|
||||
let root_1 = {
|
||||
smt.insert(key_1, value_1);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
let root_2 = {
|
||||
smt.insert(key_2, value_2);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
let root_3 = {
|
||||
smt.insert(key_3, value_3);
|
||||
smt.root()
|
||||
};
|
||||
|
||||
// Test incremental updates.
|
||||
|
||||
let mut smt = Smt::default();
|
||||
|
||||
let mutations = smt.compute_mutations(vec![(key_1, value_1)]);
|
||||
assert_eq!(mutations.root(), root_1, "prospective root 1 did not match actual root 1");
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_1, "mutations before and after apply did not match");
|
||||
|
||||
let mutations = smt.compute_mutations(vec![(key_2, value_2)]);
|
||||
assert_eq!(mutations.root(), root_2, "prospective root 2 did not match actual root 2");
|
||||
let mutations =
|
||||
smt.compute_mutations(vec![(key_3, EMPTY_WORD), (key_2, value_2), (key_3, value_3)]);
|
||||
assert_eq!(mutations.root(), root_3, "mutations before and after apply did not match");
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
|
||||
// Edge case: multiple values at the same key, where a later pair restores the original value.
|
||||
let mutations = smt.compute_mutations(vec![(key_3, EMPTY_WORD), (key_3, value_3)]);
|
||||
assert_eq!(mutations.root(), root_3);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_3);
|
||||
|
||||
// Test batch updates, and that the order doesn't matter.
|
||||
let pairs =
|
||||
vec![(key_3, value_2), (key_2, EMPTY_WORD), (key_1, EMPTY_WORD), (key_3, EMPTY_WORD)];
|
||||
let mutations = smt.compute_mutations(pairs);
|
||||
assert_eq!(
|
||||
mutations.root(),
|
||||
root_empty,
|
||||
"prospective root for batch removal did not match actual root",
|
||||
);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_empty, "mutations before and after apply did not match");
|
||||
|
||||
let pairs = vec![(key_3, value_3), (key_1, value_1), (key_2, value_2)];
|
||||
let mutations = smt.compute_mutations(pairs);
|
||||
assert_eq!(mutations.root(), root_3);
|
||||
smt.apply_mutations(mutations).unwrap();
|
||||
assert_eq!(smt.root(), root_3);
|
||||
}
|
||||
|
||||
/// Tests that 2 key-value pairs stored in the same leaf have the same path
|
||||
#[test]
|
||||
fn test_smt_path_to_keys_in_same_leaf_are_equal() {
|
||||
|
@ -326,6 +516,16 @@ fn test_smt_entries() {
|
|||
assert!(entries.next().is_none());
|
||||
}
|
||||
|
||||
/// Tests that `EMPTY_ROOT` constant generated in the `Smt` equals to the root of the empty tree of
|
||||
/// depth 64
|
||||
#[test]
|
||||
fn test_smt_check_empty_root_constant() {
|
||||
// get the root of the empty tree of depth 64
|
||||
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
|
||||
|
||||
assert_eq!(empty_root_64_depth, Smt::EMPTY_ROOT);
|
||||
}
|
||||
|
||||
// SMT LEAF
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use alloc::{collections::BTreeMap, vec::Vec};
|
||||
|
||||
use super::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, NodeIndex};
|
||||
use crate::{
|
||||
hash::rpo::{Rpo256, RpoDigest},
|
||||
Felt, Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
mod full;
|
||||
pub use full::{Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError, SMT_DEPTH};
|
||||
|
@ -44,17 +45,20 @@ pub const SMT_MAX_DEPTH: u8 = 64;
|
|||
/// [SparseMerkleTree] currently doesn't support optimizations that compress Merkle proofs.
|
||||
pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
||||
/// The type for a key
|
||||
type Key: Clone;
|
||||
type Key: Clone + Ord;
|
||||
/// The type for a value
|
||||
type Value: Clone + PartialEq;
|
||||
/// The type for a leaf
|
||||
type Leaf;
|
||||
type Leaf: Clone;
|
||||
/// The type for an opening (i.e. a "proof") of a leaf
|
||||
type Opening;
|
||||
|
||||
/// The default value used to compute the hash of empty leaves
|
||||
const EMPTY_VALUE: Self::Value;
|
||||
|
||||
/// The root of the empty tree with provided DEPTH
|
||||
const EMPTY_ROOT: RpoDigest;
|
||||
|
||||
// PROVIDED METHODS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -139,6 +143,149 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
|||
self.set_root(node_hash);
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
|
||||
/// tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`SparseMerkleTree::apply_mutations()`] can be called in order to commit these changes to
|
||||
/// the Merkle tree, or [`drop()`] to discard them.
|
||||
fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (Self::Key, Self::Value)>,
|
||||
) -> MutationSet<DEPTH, Self::Key, Self::Value> {
|
||||
use NodeMutation::*;
|
||||
|
||||
let mut new_root = self.root();
|
||||
let mut new_pairs: BTreeMap<Self::Key, Self::Value> = Default::default();
|
||||
let mut node_mutations: BTreeMap<NodeIndex, NodeMutation> = Default::default();
|
||||
|
||||
for (key, value) in kv_pairs {
|
||||
// If the old value and the new value are the same, there is nothing to update.
|
||||
// For the unusual case that kv_pairs has multiple values at the same key, we'll have
|
||||
// to check the key-value pairs we've already seen to get the "effective" old value.
|
||||
let old_value = new_pairs.get(&key).cloned().unwrap_or_else(|| self.get_value(&key));
|
||||
if value == old_value {
|
||||
continue;
|
||||
}
|
||||
|
||||
let leaf_index = Self::key_to_leaf_index(&key);
|
||||
let mut node_index = NodeIndex::from(leaf_index);
|
||||
|
||||
// We need the current leaf's hash to calculate the new leaf, but in the rare case that
|
||||
// `kv_pairs` has multiple pairs that go into the same leaf, then those pairs are also
|
||||
// part of the "current leaf".
|
||||
let old_leaf = {
|
||||
let pairs_at_index = new_pairs
|
||||
.iter()
|
||||
.filter(|&(new_key, _)| Self::key_to_leaf_index(new_key) == leaf_index);
|
||||
|
||||
pairs_at_index.fold(self.get_leaf(&key), |acc, (k, v)| {
|
||||
// Most of the time `pairs_at_index` should only contain a single entry (or
|
||||
// none at all), as multi-leaves should be really rare.
|
||||
let existing_leaf = acc.clone();
|
||||
self.construct_prospective_leaf(existing_leaf, k, v)
|
||||
})
|
||||
};
|
||||
|
||||
let new_leaf = self.construct_prospective_leaf(old_leaf, &key, &value);
|
||||
|
||||
let mut new_child_hash = Self::hash_leaf(&new_leaf);
|
||||
|
||||
for node_depth in (0..node_index.depth()).rev() {
|
||||
// Whether the node we're replacing is the right child or the left child.
|
||||
let is_right = node_index.is_value_odd();
|
||||
node_index.move_up();
|
||||
|
||||
let old_node = node_mutations
|
||||
.get(&node_index)
|
||||
.map(|mutation| match mutation {
|
||||
Addition(node) => node.clone(),
|
||||
Removal => EmptySubtreeRoots::get_inner_node(DEPTH, node_depth),
|
||||
})
|
||||
.unwrap_or_else(|| self.get_inner_node(node_index));
|
||||
|
||||
let new_node = if is_right {
|
||||
InnerNode {
|
||||
left: old_node.left,
|
||||
right: new_child_hash,
|
||||
}
|
||||
} else {
|
||||
InnerNode {
|
||||
left: new_child_hash,
|
||||
right: old_node.right,
|
||||
}
|
||||
};
|
||||
|
||||
// The next iteration will operate on this new node's hash.
|
||||
new_child_hash = new_node.hash();
|
||||
|
||||
let &equivalent_empty_hash = EmptySubtreeRoots::entry(DEPTH, node_depth);
|
||||
let is_removal = new_child_hash == equivalent_empty_hash;
|
||||
let new_entry = if is_removal { Removal } else { Addition(new_node) };
|
||||
node_mutations.insert(node_index, new_entry);
|
||||
}
|
||||
|
||||
// Once we're at depth 0, the last node we made is the new root.
|
||||
new_root = new_child_hash;
|
||||
// And then we're done with this pair; on to the next one.
|
||||
new_pairs.insert(key, value);
|
||||
}
|
||||
|
||||
MutationSet {
|
||||
old_root: self.root(),
|
||||
new_root,
|
||||
node_mutations,
|
||||
new_pairs,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`SparseMerkleTree::compute_mutations()`] to
|
||||
/// this tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
|
||||
/// the `mutations` were computed against, and the second item is the actual current root of
|
||||
/// this tree.
|
||||
fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<DEPTH, Self::Key, Self::Value>,
|
||||
) -> Result<(), MerkleError>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
use NodeMutation::*;
|
||||
let MutationSet {
|
||||
old_root,
|
||||
node_mutations,
|
||||
new_pairs,
|
||||
new_root,
|
||||
} = mutations;
|
||||
|
||||
// Guard against accidentally trying to apply mutations that were computed against a
|
||||
// different tree, including a stale version of this tree.
|
||||
if old_root != self.root() {
|
||||
return Err(MerkleError::ConflictingRoots(vec![old_root, self.root()]));
|
||||
}
|
||||
|
||||
for (index, mutation) in node_mutations {
|
||||
match mutation {
|
||||
Removal => self.remove_inner_node(index),
|
||||
Addition(node) => self.insert_inner_node(index, node),
|
||||
}
|
||||
}
|
||||
|
||||
for (key, value) in new_pairs {
|
||||
self.insert_value(key, value);
|
||||
}
|
||||
|
||||
self.set_root(new_root);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// REQUIRED METHODS
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -160,12 +307,34 @@ pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
|
|||
/// Inserts a leaf node, and returns the value at the key if already exists
|
||||
fn insert_value(&mut self, key: Self::Key, value: Self::Value) -> Option<Self::Value>;
|
||||
|
||||
/// Returns the value at the specified key. Recall that by definition, any key that hasn't been
|
||||
/// updated is associated with [`Self::EMPTY_VALUE`].
|
||||
fn get_value(&self, key: &Self::Key) -> Self::Value;
|
||||
|
||||
/// Returns the leaf at the specified index.
|
||||
fn get_leaf(&self, key: &Self::Key) -> Self::Leaf;
|
||||
|
||||
/// Returns the hash of a leaf
|
||||
fn hash_leaf(leaf: &Self::Leaf) -> RpoDigest;
|
||||
|
||||
/// Returns what a leaf would look like if a key-value pair were inserted into the tree, without
|
||||
/// mutating the tree itself. The existing leaf can be empty.
|
||||
///
|
||||
/// To get a prospective leaf based on the current state of the tree, use `self.get_leaf(key)`
|
||||
/// as the argument for `existing_leaf`. The return value from this function can be chained back
|
||||
/// into this function as the first argument to continue making prospective changes.
|
||||
///
|
||||
/// # Invariants
|
||||
/// Because this method is for a prospective key-value insertion into a specific leaf,
|
||||
/// `existing_leaf` must have the same leaf index as `key` (as determined by
|
||||
/// [`SparseMerkleTree::key_to_leaf_index()`]), or the result will be meaningless.
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
existing_leaf: Self::Leaf,
|
||||
key: &Self::Key,
|
||||
value: &Self::Value,
|
||||
) -> Self::Leaf;
|
||||
|
||||
/// Maps a key to a leaf index
|
||||
fn key_to_leaf_index(key: &Self::Key) -> LeafIndex<DEPTH>;
|
||||
|
||||
|
@ -243,3 +412,50 @@ impl<const DEPTH: u8> TryFrom<NodeIndex> for LeafIndex<DEPTH> {
|
|||
Self::new(node_index.value())
|
||||
}
|
||||
}
|
||||
|
||||
// MUTATIONS
|
||||
// ================================================================================================
|
||||
|
||||
/// A change to an inner node of a [`SparseMerkleTree`] that hasn't yet been applied.
|
||||
/// [`MutationSet`] stores this type in relation to a [`NodeIndex`] to keep track of what changes
|
||||
/// need to occur at which node indices.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(crate) enum NodeMutation {
|
||||
/// Corresponds to [`SparseMerkleTree::remove_inner_node()`].
|
||||
Removal,
|
||||
/// Corresponds to [`SparseMerkleTree::insert_inner_node()`].
|
||||
Addition(InnerNode),
|
||||
}
|
||||
|
||||
/// Represents a group of prospective mutations to a `SparseMerkleTree`, created by
|
||||
/// `SparseMerkleTree::compute_mutations()`, and that can be applied with
|
||||
/// `SparseMerkleTree::apply_mutations()`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct MutationSet<const DEPTH: u8, K, V> {
|
||||
/// The root of the Merkle tree this MutationSet is for, recorded at the time
|
||||
/// [`SparseMerkleTree::compute_mutations()`] was called. Exists to guard against applying
|
||||
/// mutations to the wrong tree or applying stale mutations to a tree that has since changed.
|
||||
old_root: RpoDigest,
|
||||
/// The set of nodes that need to be removed or added. The "effective" node at an index is the
|
||||
/// Merkle tree's existing node at that index, with the [`NodeMutation`] in this map at that
|
||||
/// index overlayed, if any. Each [`NodeMutation::Addition`] corresponds to a
|
||||
/// [`SparseMerkleTree::insert_inner_node()`] call, and each [`NodeMutation::Removal`]
|
||||
/// corresponds to a [`SparseMerkleTree::remove_inner_node()`] call.
|
||||
node_mutations: BTreeMap<NodeIndex, NodeMutation>,
|
||||
/// The set of top-level key-value pairs we're prospectively adding to the tree, including
|
||||
/// adding empty values. The "effective" value for a key is the value in this BTreeMap, falling
|
||||
/// back to the existing value in the Merkle tree. Each entry corresponds to a
|
||||
/// [`SparseMerkleTree::insert_value()`] call.
|
||||
new_pairs: BTreeMap<K, V>,
|
||||
/// The calculated root for the Merkle tree, given these mutations. Publicly retrievable with
|
||||
/// [`MutationSet::root()`]. Corresponds to a [`SparseMerkleTree::set_root()`]. call.
|
||||
new_root: RpoDigest,
|
||||
}
|
||||
|
||||
impl<const DEPTH: u8, K, V> MutationSet<DEPTH, K, V> {
|
||||
/// Queries the root that was calculated during `SparseMerkleTree::compute_mutations()`. See
|
||||
/// that method for more information.
|
||||
pub fn root(&self) -> RpoDigest {
|
||||
self.new_root
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use alloc::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use super::{
|
||||
super::ValuePath, EmptySubtreeRoots, InnerNode, InnerNodeInfo, LeafIndex, MerkleError,
|
||||
MerklePath, NodeIndex, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD, SMT_MAX_DEPTH,
|
||||
SMT_MIN_DEPTH,
|
||||
MerklePath, MutationSet, NodeIndex, RpoDigest, SparseMerkleTree, Word, EMPTY_WORD,
|
||||
SMT_MAX_DEPTH, SMT_MIN_DEPTH,
|
||||
};
|
||||
use alloc::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
@ -157,6 +158,12 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
|||
<Self as SparseMerkleTree<DEPTH>>::open(self, key)
|
||||
}
|
||||
|
||||
/// Returns a boolean value indicating whether the SMT is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
|
||||
self.root == Self::EMPTY_ROOT
|
||||
}
|
||||
|
||||
// ITERATORS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
@ -187,6 +194,48 @@ impl<const DEPTH: u8> SimpleSmt<DEPTH> {
|
|||
<Self as SparseMerkleTree<DEPTH>>::insert(self, key, value)
|
||||
}
|
||||
|
||||
/// Computes what changes are necessary to insert the specified key-value pairs into this
|
||||
/// Merkle tree, allowing for validation before applying those changes.
|
||||
///
|
||||
/// This method returns a [`MutationSet`], which contains all the information for inserting
|
||||
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
|
||||
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
|
||||
/// [`SimpleSmt::apply_mutations()`] can be called in order to commit these changes to the
|
||||
/// Merkle tree, or [`drop()`] to discard them.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use miden_crypto::{hash::rpo::RpoDigest, Felt, Word};
|
||||
/// # use miden_crypto::merkle::{LeafIndex, SimpleSmt, EmptySubtreeRoots, SMT_DEPTH};
|
||||
/// let mut smt: SimpleSmt<3> = SimpleSmt::new().unwrap();
|
||||
/// let pair = (LeafIndex::default(), Word::default());
|
||||
/// let mutations = smt.compute_mutations(vec![pair]);
|
||||
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(3, 0));
|
||||
/// smt.apply_mutations(mutations);
|
||||
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(3, 0));
|
||||
/// ```
|
||||
pub fn compute_mutations(
|
||||
&self,
|
||||
kv_pairs: impl IntoIterator<Item = (LeafIndex<DEPTH>, Word)>,
|
||||
) -> MutationSet<DEPTH, LeafIndex<DEPTH>, Word> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::compute_mutations(self, kv_pairs)
|
||||
}
|
||||
|
||||
/// Apply the prospective mutations computed with [`SimpleSmt::compute_mutations()`] to this
|
||||
/// tree.
|
||||
///
|
||||
/// # Errors
|
||||
/// If `mutations` was computed on a tree with a different root than this one, returns
|
||||
/// [`MerkleError::ConflictingRoots`] with a two-item [`alloc::vec::Vec`]. The first item is the
|
||||
/// root hash the `mutations` were computed against, and the second item is the actual
|
||||
/// current root of this tree.
|
||||
pub fn apply_mutations(
|
||||
&mut self,
|
||||
mutations: MutationSet<DEPTH, LeafIndex<DEPTH>, Word>,
|
||||
) -> Result<(), MerkleError> {
|
||||
<Self as SparseMerkleTree<DEPTH>>::apply_mutations(self, mutations)
|
||||
}
|
||||
|
||||
/// Inserts a subtree at the specified index. The depth at which the subtree is inserted is
|
||||
/// computed as `DEPTH - SUBTREE_DEPTH`.
|
||||
///
|
||||
|
@ -255,6 +304,7 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
|||
type Opening = ValuePath;
|
||||
|
||||
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
|
||||
const EMPTY_ROOT: RpoDigest = *EmptySubtreeRoots::entry(DEPTH, 0);
|
||||
|
||||
fn root(&self) -> RpoDigest {
|
||||
self.root
|
||||
|
@ -265,11 +315,10 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
|||
}
|
||||
|
||||
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
|
||||
self.inner_nodes.get(&index).cloned().unwrap_or_else(|| {
|
||||
let node = EmptySubtreeRoots::entry(DEPTH, index.depth() + 1);
|
||||
|
||||
InnerNode { left: *node, right: *node }
|
||||
})
|
||||
self.inner_nodes
|
||||
.get(&index)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(DEPTH, index.depth()))
|
||||
}
|
||||
|
||||
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) {
|
||||
|
@ -288,6 +337,10 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_value(&self, key: &LeafIndex<DEPTH>) -> Word {
|
||||
self.get_leaf(key)
|
||||
}
|
||||
|
||||
fn get_leaf(&self, key: &LeafIndex<DEPTH>) -> Word {
|
||||
let leaf_pos = key.value();
|
||||
match self.leaves.get(&leaf_pos) {
|
||||
|
@ -301,6 +354,15 @@ impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
|
|||
leaf.into()
|
||||
}
|
||||
|
||||
fn construct_prospective_leaf(
|
||||
&self,
|
||||
_existing_leaf: Word,
|
||||
_key: &LeafIndex<DEPTH>,
|
||||
value: &Word,
|
||||
) -> Word {
|
||||
*value
|
||||
}
|
||||
|
||||
fn key_to_leaf_index(key: &LeafIndex<DEPTH>) -> LeafIndex<DEPTH> {
|
||||
*key
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use alloc::vec::Vec;
|
||||
|
||||
use super::{
|
||||
super::{MerkleError, RpoDigest, SimpleSmt},
|
||||
NodeIndex,
|
||||
|
@ -10,7 +12,6 @@ use crate::{
|
|||
},
|
||||
Word, EMPTY_WORD,
|
||||
};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
|
@ -443,6 +444,23 @@ fn test_simplesmt_set_subtree_entire_tree() {
|
|||
assert_eq!(tree.root(), *EmptySubtreeRoots::entry(DEPTH, 0));
|
||||
}
|
||||
|
||||
/// Tests that `EMPTY_ROOT` constant generated in the `SimpleSmt` equals to the root of the empty
|
||||
/// tree of depth 64
|
||||
#[test]
|
||||
fn test_simplesmt_check_empty_root_constant() {
|
||||
// get the root of the empty tree of depth 64
|
||||
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
|
||||
assert_eq!(empty_root_64_depth, SimpleSmt::<64>::EMPTY_ROOT);
|
||||
|
||||
// get the root of the empty tree of depth 32
|
||||
let empty_root_32_depth = EmptySubtreeRoots::empty_hashes(32)[0];
|
||||
assert_eq!(empty_root_32_depth, SimpleSmt::<32>::EMPTY_ROOT);
|
||||
|
||||
// get the root of the empty tree of depth 0
|
||||
let empty_root_1_depth = EmptySubtreeRoots::empty_hashes(1)[0];
|
||||
assert_eq!(empty_root_1_depth, SimpleSmt::<1>::EMPTY_ROOT);
|
||||
}
|
||||
|
||||
// HELPER FUNCTIONS
|
||||
// --------------------------------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -127,8 +127,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
|||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn get_node(&self, root: RpoDigest, index: NodeIndex) -> Result<RpoDigest, MerkleError> {
|
||||
let mut hash = root;
|
||||
|
||||
|
@ -152,8 +152,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
|||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn get_path(&self, root: RpoDigest, index: NodeIndex) -> Result<ValuePath, MerkleError> {
|
||||
let mut hash = root;
|
||||
let mut path = Vec::with_capacity(index.depth().into());
|
||||
|
@ -421,8 +421,8 @@ impl<T: KvMap<RpoDigest, StoreNode>> MerkleStore<T> {
|
|||
/// # Errors
|
||||
/// This method can return the following errors:
|
||||
/// - `RootNotInStore` if the `root` is not present in the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in
|
||||
/// the store.
|
||||
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
|
||||
/// store.
|
||||
pub fn set_node(
|
||||
&mut self,
|
||||
mut root: RpoDigest,
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
use seq_macro::seq;
|
||||
#[cfg(feature = "std")]
|
||||
use {
|
||||
super::{Deserializable, Serializable},
|
||||
alloc::boxed::Box,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
use super::{
|
||||
DefaultMerkleStore as MerkleStore, EmptySubtreeRoots, MerkleError, MerklePath, NodeIndex,
|
||||
|
@ -11,13 +17,6 @@ use crate::{
|
|||
Felt, Word, ONE, WORD_SIZE, ZERO,
|
||||
};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use {
|
||||
super::{Deserializable, Serializable},
|
||||
alloc::boxed::Box,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
// TEST DATA
|
||||
// ================================================================================================
|
||||
|
||||
|
@ -614,7 +613,7 @@ fn node_path_should_be_truncated_by_midtier_insert() {
|
|||
let path = store.get_path(root, index).unwrap().path;
|
||||
assert_eq!(node, result);
|
||||
assert_eq!(path.depth(), depth);
|
||||
assert!(path.verify(index.value(), result, &root));
|
||||
assert!(path.verify(index.value(), result, &root).is_ok());
|
||||
|
||||
// flip the first bit of the key and insert the second node on a different depth
|
||||
let key = key ^ (1 << 63);
|
||||
|
@ -627,7 +626,7 @@ fn node_path_should_be_truncated_by_midtier_insert() {
|
|||
let path = store.get_path(root, index).unwrap().path;
|
||||
assert_eq!(node, result);
|
||||
assert_eq!(path.depth(), depth);
|
||||
assert!(path.verify(index.value(), result, &root));
|
||||
assert!(path.verify(index.value(), result, &root).is_ok());
|
||||
|
||||
// attempt to fetch a path of the second node to depth 64
|
||||
// should fail because the previously inserted node will remove its sub-tree from the set
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use rand_core::impls;
|
||||
|
||||
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
|
||||
use crate::{
|
||||
hash::rpo::{Rpo256, RpoDigest},
|
||||
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use rand_core::impls;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpo256::RATE_RANGE.end - Rpo256::RATE_RANGE.star
|
|||
/// described in <https://eprint.iacr.org/2011/499.pdf>.
|
||||
///
|
||||
/// The simplification is related to the following facts:
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function.
|
||||
/// This is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
|
||||
/// is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
|
||||
/// material.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
use alloc::{string::ToString, vec::Vec};
|
||||
|
||||
use rand_core::impls;
|
||||
|
||||
use super::{Felt, FeltRng, FieldElement, RandomCoin, RandomCoinError, RngCore, Word, ZERO};
|
||||
use crate::{
|
||||
hash::rpx::{Rpx256, RpxDigest},
|
||||
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
|
||||
};
|
||||
use alloc::{string::ToString, vec::Vec};
|
||||
use rand_core::impls;
|
||||
|
||||
// CONSTANTS
|
||||
// ================================================================================================
|
||||
|
@ -20,8 +22,8 @@ const HALF_RATE_WIDTH: usize = (Rpx256::RATE_RANGE.end - Rpx256::RATE_RANGE.star
|
|||
/// described in <https://eprint.iacr.org/2011/499.pdf>.
|
||||
///
|
||||
/// The simplification is related to the following facts:
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function.
|
||||
/// This is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 1. A call to the reseed method implies one and only one call to the permutation function. This
|
||||
/// is possible because in our case we never reseed with more than 4 field elements.
|
||||
/// 2. As a result of the previous point, we don't make use of an input buffer to accumulate seed
|
||||
/// material.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
|
|
|
@ -62,8 +62,8 @@ impl<K: Ord + Clone, V: Clone> KvMap<K, V> for BTreeMap<K, V> {
|
|||
/// The [RecordingMap] is composed of three parts:
|
||||
/// - `data`: which contains the current set of key-value pairs in the map.
|
||||
/// - `updates`: which tracks keys for which values have been changed since the map was
|
||||
/// instantiated. updates include both insertions, removals and updates of values under existing
|
||||
/// keys.
|
||||
/// instantiated. updates include both insertions, removals and updates of values under existing
|
||||
/// keys.
|
||||
/// - `trace`: which contains the key-value pairs from the original data which have been accesses
|
||||
/// since the map was instantiated.
|
||||
#[derive(Debug, Default, Clone, Eq, PartialEq)]
|
||||
|
@ -325,7 +325,8 @@ mod tests {
|
|||
let mut map = RecordingMap::new(ITEMS.to_vec());
|
||||
assert!(map.iter().all(|(x, y)| ITEMS.contains(&(*x, *y))));
|
||||
|
||||
// when inserting entry with key that already exists the iterator should return the new value
|
||||
// when inserting entry with key that already exists the iterator should return the new
|
||||
// value
|
||||
let new_value = 5;
|
||||
map.insert(4, new_value);
|
||||
assert_eq!(map.iter().count(), ITEMS.len());
|
||||
|
|
|
@ -59,16 +59,16 @@ impl Display for HexParseError {
|
|||
match self {
|
||||
HexParseError::InvalidLength { expected, actual } => {
|
||||
write!(f, "Expected hex data to have length {expected}, including the 0x prefix. Got {actual}")
|
||||
}
|
||||
},
|
||||
HexParseError::MissingPrefix => {
|
||||
write!(f, "Hex encoded data must start with 0x prefix")
|
||||
}
|
||||
},
|
||||
HexParseError::InvalidChar => {
|
||||
write!(f, "Hex encoded data must contain characters [a-zA-Z0-9]")
|
||||
}
|
||||
},
|
||||
HexParseError::OutOfRange => {
|
||||
write!(f, "Hex encoded values of an RpoDigest must be inside the field modulus")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue