diff --git a/.cargo/config b/.cargo/config index 39e142321a..8f61ca69cf 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,7 +1,7 @@ [cargo-new] name = "Nervos Core Dev" email = "dev@nervos.org" -edition = "2018" +edition = "2021" [target.aarch64-unknown-linux-gnu] linker = "aarch64-linux-gnu-gcc" diff --git a/Cargo.toml b/Cargo.toml index e45fded827..eb7af64f8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,12 +3,12 @@ name = "ckb" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" build = "build.rs" description = "CKB is the layer 1 of Nervos Network, a public/permissionless blockchain." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" -resolver = "2" +rust-version = "1.56.1" [build-dependencies] ckb-build-info = { path = "util/build-info", version = "= 0.102.0-pre" } diff --git a/README.md b/README.md index 054ae4de21..569f4e38ca 100644 --- a/README.md +++ b/README.md @@ -52,11 +52,6 @@ The `master` branch is regularly built and tested. It is considered already prod The contribution workflow is described in [CONTRIBUTING.md](CONTRIBUTING.md), and security policy is described in [SECURITY.md](SECURITY.md). To propose new protocol or standard for Nervos, see [Nervos RFC](https://github.com/nervosnetwork/rfcs). - -## Minimum Supported Rust Version policy (MSRV) - -The crate `ckb`'s minimum supported rustc version is 1.51.0. - --- ## Documentations diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 5db0e613d8..d2a76e8d81 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-benches" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB benchmarks." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/benches/benches/benchmarks/always_success.rs b/benches/benches/benchmarks/always_success.rs index a21e17cd41..dc5f0f205a 100644 --- a/benches/benches/benchmarks/always_success.rs +++ b/benches/benches/benchmarks/always_success.rs @@ -75,7 +75,7 @@ fn bench(c: &mut Criterion) { .unwrap()]; let mut parent = blocks[0].clone(); (0..5).for_each(|i| { - let block = gen_always_success_block(&mut blocks, &parent, &shared2); + let block = gen_always_success_block(&mut blocks, &parent, shared2); chain2 .internal_process_block( Arc::new(block.clone()), @@ -94,7 +94,7 @@ fn bench(c: &mut Criterion) { }); let mut parent = blocks[2].clone(); (0..2).for_each(|_| { - let block = gen_always_success_block(&mut blocks, &parent, &shared3); + let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 .internal_process_block( Arc::new(block.clone()), @@ -149,7 +149,7 @@ fn bench(c: &mut Criterion) { .unwrap()]; let mut parent = blocks[0].clone(); (0..5).for_each(|i| { - let block = gen_always_success_block(&mut blocks, &parent, &shared2); + let block = gen_always_success_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) @@ -163,7 +163,7 @@ fn bench(c: &mut Criterion) { }); let mut parent = blocks[2].clone(); (0..4).for_each(|_| { - let block = gen_always_success_block(&mut blocks, &parent, &shared3); + let block = gen_always_success_block(&mut blocks, &parent, shared3); chain3 .internal_process_block( Arc::new(block.clone()), diff --git a/benches/benches/benchmarks/overall.rs b/benches/benches/benchmarks/overall.rs index acbdaae22e..32fba28126 100644 --- a/benches/benches/benchmarks/overall.rs +++ b/benches/benches/benchmarks/overall.rs @@ -24,7 +24,6 @@ use ckb_verification::HeaderVerifier; use ckb_verification_traits::Verifier; use criterion::{criterion_group, BatchSize, BenchmarkId, Criterion}; use rand::random; -use std::convert::TryFrom; use std::sync::Arc; #[cfg(not(feature = "ci"))] @@ -200,7 +199,7 @@ fn bench(c: &mut Criterion) { let block = raw_block.as_builder().header(header).build().into_view(); let header_verifier = - HeaderVerifier::new(snapshot.as_ref(), &shared.consensus()); + HeaderVerifier::new(snapshot.as_ref(), shared.consensus()); header_verifier .verify(&block.header()) .expect("header verified"); diff --git a/benches/benches/benchmarks/resolve.rs b/benches/benches/benchmarks/resolve.rs index 42c19c02f2..b936dd0203 100644 --- a/benches/benches/benchmarks/resolve.rs +++ b/benches/benches/benchmarks/resolve.rs @@ -20,7 +20,6 @@ use ckb_types::{ }; use criterion::{criterion_group, BatchSize, BenchmarkId, Criterion}; use std::collections::HashSet; -use std::convert::TryFrom; #[cfg(not(feature = "ci"))] const SIZE: usize = 500; @@ -127,7 +126,7 @@ fn bench(c: &mut Criterion) { |(shared, _)| { let mut i = 100; let snapshot: &Snapshot = &shared.snapshot(); - let txs = gen_txs_from_genesis(&shared.consensus().genesis_block()); + let txs = gen_txs_from_genesis(shared.consensus().genesis_block()); while i > 0 { let mut seen_inputs = HashSet::new(); @@ -153,7 +152,7 @@ fn bench(c: &mut Criterion) { |(shared, _)| { let mut i = 1; let snapshot: &Snapshot = &shared.snapshot(); - let txs = gen_txs_from_genesis(&shared.consensus().genesis_block()); + let txs = gen_txs_from_genesis(shared.consensus().genesis_block()); let mut seen_inputs = HashSet::new(); let rtxs: Vec<_> = txs diff --git a/benches/benches/benchmarks/secp_2in2out.rs b/benches/benches/benchmarks/secp_2in2out.rs index 7d1abd6883..8dd3eb3d2d 100644 --- a/benches/benches/benchmarks/secp_2in2out.rs +++ b/benches/benches/benchmarks/secp_2in2out.rs @@ -75,7 +75,7 @@ fn bench(c: &mut Criterion) { .unwrap()]; let mut parent = blocks[0].clone(); (0..5).for_each(|i| { - let block = gen_secp_block(&mut blocks, &parent, &shared2); + let block = gen_secp_block(&mut blocks, &parent, shared2); chain2 .internal_process_block( Arc::new(block.clone()), @@ -94,7 +94,7 @@ fn bench(c: &mut Criterion) { }); let mut parent = blocks[2].clone(); (0..2).for_each(|_| { - let block = gen_secp_block(&mut blocks, &parent, &shared3); + let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 .internal_process_block( Arc::new(block.clone()), @@ -149,7 +149,7 @@ fn bench(c: &mut Criterion) { .unwrap()]; let mut parent = blocks[0].clone(); (0..5).for_each(|i| { - let block = gen_secp_block(&mut blocks, &parent, &shared2); + let block = gen_secp_block(&mut blocks, &parent, shared2); let arc_block = Arc::new(block.clone()); chain2 .internal_process_block(Arc::clone(&arc_block), Switch::DISABLE_ALL) @@ -163,7 +163,7 @@ fn bench(c: &mut Criterion) { }); let mut parent = blocks[2].clone(); (0..4).for_each(|_| { - let block = gen_secp_block(&mut blocks, &parent, &shared3); + let block = gen_secp_block(&mut blocks, &parent, shared3); chain3 .internal_process_block( Arc::new(block.clone()), diff --git a/benches/benches/benchmarks/util.rs b/benches/benches/benchmarks/util.rs index 0cc77f0eed..f9e8828b19 100644 --- a/benches/benches/benchmarks/util.rs +++ b/benches/benches/benchmarks/util.rs @@ -462,7 +462,7 @@ pub fn create_2out_transaction( for w in &non_sig_witnesses { let len: u64 = w.len() as u64; blake2b.update(&len.to_le_bytes()); - blake2b.update(&w); + blake2b.update(w); } blake2b.finalize(&mut message); let message = H256::from(message); @@ -501,7 +501,7 @@ pub fn dao_data(shared: &Shared, parent: &HeaderView, txs: &[TransactionView]) - let data_loader = snapshot.as_data_provider(); let calculator = DaoCalculator::new(snapshot.consensus(), &data_loader); calculator - .dao_field(&rtxs, &parent) + .dao_field(&rtxs, parent) .expect("calculator dao_field") } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 617b1c9ccc..af8f533df4 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-chain" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB Blockchain Service, Importing Blocks" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 79cad26f73..05707a3a43 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -295,7 +295,7 @@ impl ChainService { db_txn.insert_current_epoch_ext(&target_epoch_ext)?; for blk in fork.attached_blocks() { - db_txn.delete_block(&blk)?; + db_txn.delete_block(blk)?; } db_txn.commit()?; @@ -340,13 +340,13 @@ impl ChainService { fn non_contextual_verify(&self, block: &BlockView) -> Result<(), Error> { let consensus = self.shared.consensus(); - BlockVerifier::new(consensus).verify(&block).map_err(|e| { + BlockVerifier::new(consensus).verify(block).map_err(|e| { debug!("[process_block] BlockVerifier error {:?}", e); e })?; NonContextualBlockTxsVerifier::new(consensus) - .verify(&block) + .verify(block) .map_err(|e| { debug!( "[process_block] NonContextualBlockTxsVerifier error {:?}", @@ -535,7 +535,7 @@ impl ChainService { .insert(blk.header().number(), blk.union_proposal_ids()); } - self.reload_proposal_table(&fork); + self.reload_proposal_table(fork); } // if rollback happen, go back check whether need reload proposal_table from block @@ -771,7 +771,7 @@ impl ChainService { &resolved, b, Arc::clone(&txs_verify_cache), - &async_handle, + async_handle, switch, ) { Ok((cycles, cache_entries)) => { diff --git a/chain/src/tests/basic.rs b/chain/src/tests/basic.rs index d33e08cf70..852d164b33 100644 --- a/chain/src/tests/basic.rs +++ b/chain/src/tests/basic.rs @@ -130,7 +130,7 @@ fn test_transaction_spend_in_same_block() { let last_cellbase = &shared.consensus().genesis_block().transactions()[1]; let last_cellbase_hash = last_cellbase.hash(); - let tx1 = create_multi_outputs_transaction(&last_cellbase, vec![0], 2, vec![1]); + let tx1 = create_multi_outputs_transaction(last_cellbase, vec![0], 2, vec![1]); let tx1_hash = tx1.hash(); let tx2 = create_multi_outputs_transaction(&tx1, vec![0], 2, vec![2]); let tx2_hash = tx2.hash(); @@ -255,7 +255,7 @@ fn test_transaction_conflict_in_different_blocks() { chain.gen_empty_block(&mock_store); let last_cellbase = &shared.consensus().genesis_block().transactions()[1]; - let tx1 = create_multi_outputs_transaction(&last_cellbase, vec![0], 2, vec![1]); + let tx1 = create_multi_outputs_transaction(last_cellbase, vec![0], 2, vec![1]); let tx1_hash = tx1.hash(); let tx2 = create_multi_outputs_transaction(&tx1, vec![0], 2, vec![1]); let tx3 = create_multi_outputs_transaction(&tx1, vec![0], 2, vec![2]); diff --git a/chain/src/tests/block_assembler.rs b/chain/src/tests/block_assembler.rs index 111622cddd..2624fabd9c 100644 --- a/chain/src/tests/block_assembler.rs +++ b/chain/src/tests/block_assembler.rs @@ -84,7 +84,7 @@ fn test_get_block_template() { let header_verify_result = { let snapshot: &Snapshot = &shared.snapshot(); - let header_verifier = HeaderVerifier::new(snapshot, &shared.consensus()); + let header_verifier = HeaderVerifier::new(snapshot, shared.consensus()); header_verifier.verify(&header) }; assert!(header_verify_result.is_ok()); diff --git a/chain/src/tests/cell.rs b/chain/src/tests/cell.rs index 2147e8bf5e..99d5882bcd 100644 --- a/chain/src/tests/cell.rs +++ b/chain/src/tests/cell.rs @@ -58,7 +58,7 @@ pub(crate) fn gen_block( let epoch = shared .consensus() - .next_epoch_ext(&parent_header, &shared.store().as_data_provider()) + .next_epoch_ext(parent_header, &shared.store().as_data_provider()) .unwrap() .epoch(); diff --git a/chain/src/tests/delay_verify.rs b/chain/src/tests/delay_verify.rs index f4531a8ecf..bcd178fdab 100644 --- a/chain/src/tests/delay_verify.rs +++ b/chain/src/tests/delay_verify.rs @@ -87,7 +87,7 @@ fn test_dead_cell_in_different_block() { } let last_cellbase = &shared.consensus().genesis_block().transactions()[1]; - let tx1 = create_multi_outputs_transaction(&last_cellbase, vec![0], 2, vec![1]); + let tx1 = create_multi_outputs_transaction(last_cellbase, vec![0], 2, vec![1]); let tx1_hash = tx1.hash(); let tx2 = create_multi_outputs_transaction(&tx1, vec![0], 2, vec![2]); let tx3 = create_multi_outputs_transaction(&tx1, vec![0], 2, vec![3]); @@ -271,7 +271,7 @@ fn test_full_dead_transaction() { chain2.push(block.clone()); mock_store.insert_block(&block, shared.consensus().genesis_epoch_ext()); let root_tx = &shared.consensus().genesis_block().transactions()[1]; - let tx1 = create_multi_outputs_transaction(&root_tx, vec![0], 1, vec![1]); + let tx1 = create_multi_outputs_transaction(root_tx, vec![0], 1, vec![1]); parent = block.header(); for i in 2..switch_fork_number { diff --git a/chain/src/tests/non_contextual_block_txs_verify.rs b/chain/src/tests/non_contextual_block_txs_verify.rs index 1266f4c3f6..827370fcb1 100644 --- a/chain/src/tests/non_contextual_block_txs_verify.rs +++ b/chain/src/tests/non_contextual_block_txs_verify.rs @@ -65,7 +65,7 @@ pub(crate) fn gen_block( let epoch = shared .consensus() - .next_epoch_ext(&parent_header, &shared.store().as_data_provider()) + .next_epoch_ext(parent_header, &shared.store().as_data_provider()) .unwrap() .epoch(); diff --git a/chain/src/tests/reward.rs b/chain/src/tests/reward.rs index 97f1064f23..7038aca0d8 100644 --- a/chain/src/tests/reward.rs +++ b/chain/src/tests/reward.rs @@ -84,7 +84,7 @@ pub(crate) fn gen_block( let epoch = shared .consensus() - .next_epoch_ext(&parent_header, &shared.store().as_data_provider()) + .next_epoch_ext(parent_header, &shared.store().as_data_provider()) .unwrap() .epoch(); diff --git a/chain/src/tests/txs_verify_cache.rs b/chain/src/tests/txs_verify_cache.rs index 616c5bbc3f..7b88a42f94 100644 --- a/chain/src/tests/txs_verify_cache.rs +++ b/chain/src/tests/txs_verify_cache.rs @@ -20,7 +20,7 @@ use ckb_verification_traits::Switch; use faketime::unix_time_as_millis; use lazy_static::lazy_static; -use std::{convert::TryInto as _, fs::File, io::Read as _, path::Path, sync::Arc}; +use std::{fs::File, io::Read as _, path::Path, sync::Arc}; use crate::{ chain::{ChainController, ChainService}, diff --git a/chain/src/tests/util.rs b/chain/src/tests/util.rs index 0dad1e30f8..253793a670 100644 --- a/chain/src/tests/util.rs +++ b/chain/src/tests/util.rs @@ -343,7 +343,7 @@ impl<'a> MockChain<'a> { pub fn gen_block_with_proposal_txs(&mut self, txs: Vec, store: &MockStore) { let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let epoch = self .consensus @@ -372,7 +372,7 @@ impl<'a> MockChain<'a> { ) { let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let new_block = BlockBuilder::default() .parent_hash(parent.hash()) @@ -389,7 +389,7 @@ impl<'a> MockChain<'a> { pub fn gen_empty_block_with_diff(&mut self, difficulty: u64, store: &MockStore) { let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let new_block = BlockBuilder::default() .parent_hash(parent.hash()) @@ -406,7 +406,7 @@ impl<'a> MockChain<'a> { let difficulty = self.difficulty(); let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let new_block = BlockBuilder::default() .parent_hash(parent.hash()) @@ -422,7 +422,7 @@ impl<'a> MockChain<'a> { pub fn gen_empty_block_with_nonce(&mut self, nonce: u128, store: &MockStore) { let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let epoch = self .consensus @@ -446,7 +446,7 @@ impl<'a> MockChain<'a> { pub fn gen_empty_block(&mut self, store: &MockStore) { let parent = self.tip_header(); let cellbase = create_cellbase(store, self.consensus, &parent); - let dao = dao_data(&self.consensus, &parent, &[cellbase.clone()], store, false); + let dao = dao_data(self.consensus, &parent, &[cellbase.clone()], store, false); let epoch = self .consensus @@ -477,7 +477,7 @@ impl<'a> MockChain<'a> { let mut txs_to_resolve = vec![cellbase.clone()]; txs_to_resolve.extend_from_slice(&txs); let dao = dao_data( - &self.consensus, + self.consensus, &parent, &txs_to_resolve, store, @@ -558,5 +558,5 @@ pub fn dao_data( }; let data_loader = store.store().as_data_provider(); let calculator = DaoCalculator::new(consensus, &data_loader); - calculator.dao_field(&rtxs, &parent).unwrap() + calculator.dao_field(&rtxs, parent).unwrap() } diff --git a/ckb-bin/Cargo.toml b/ckb-bin/Cargo.toml index 3a0ee4ddac..6e20226bed 100644 --- a/ckb-bin/Cargo.toml +++ b/ckb-bin/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-bin" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB executable." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index fe3eb832e9..ceef93c558 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -30,15 +30,15 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { let (bin_name, app_matches) = cli::get_bin_name_and_matches(&version); match app_matches.subcommand() { (cli::CMD_INIT, Some(matches)) => { - return subcommand::init(Setup::init(&matches)?); + return subcommand::init(Setup::init(matches)?); } (cli::CMD_LIST_HASHES, Some(matches)) => { - return subcommand::list_hashes(Setup::root_dir_from_matches(&matches)?, matches); + return subcommand::list_hashes(Setup::root_dir_from_matches(matches)?, matches); } (cli::CMD_PEERID, Some(matches)) => match matches.subcommand() { - (cli::CMD_GEN_SECRET, Some(matches)) => return Setup::gen(&matches), + (cli::CMD_GEN_SECRET, Some(matches)) => return Setup::gen(matches), (cli::CMD_FROM_SECRET, Some(matches)) => { - return subcommand::peer_id(Setup::peer_id(&matches)?); + return subcommand::peer_id(Setup::peer_id(matches)?); } _ => {} }, @@ -58,15 +58,15 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { raise_fd_limit(); let ret = match cmd { - cli::CMD_RUN => subcommand::run(setup.run(&matches)?, version, handle), - cli::CMD_MINER => subcommand::miner(setup.miner(&matches)?, handle), - cli::CMD_REPLAY => subcommand::replay(setup.replay(&matches)?, handle), - cli::CMD_EXPORT => subcommand::export(setup.export(&matches)?, handle), - cli::CMD_IMPORT => subcommand::import(setup.import(&matches)?, handle), - cli::CMD_STATS => subcommand::stats(setup.stats(&matches)?, handle), - cli::CMD_RESET_DATA => subcommand::reset_data(setup.reset_data(&matches)?), - cli::CMD_MIGRATE => subcommand::migrate(setup.migrate(&matches)?), - cli::CMD_DB_REPAIR => subcommand::db_repair(setup.db_repair(&matches)?), + cli::CMD_RUN => subcommand::run(setup.run(matches)?, version, handle), + cli::CMD_MINER => subcommand::miner(setup.miner(matches)?, handle), + cli::CMD_REPLAY => subcommand::replay(setup.replay(matches)?, handle), + cli::CMD_EXPORT => subcommand::export(setup.export(matches)?, handle), + cli::CMD_IMPORT => subcommand::import(setup.import(matches)?, handle), + cli::CMD_STATS => subcommand::stats(setup.stats(matches)?, handle), + cli::CMD_RESET_DATA => subcommand::reset_data(setup.reset_data(matches)?), + cli::CMD_MIGRATE => subcommand::migrate(setup.migrate(matches)?), + cli::CMD_DB_REPAIR => subcommand::db_repair(setup.db_repair(matches)?), _ => unreachable!(), }; diff --git a/ckb-bin/src/setup_guard.rs b/ckb-bin/src/setup_guard.rs index bca478f2f9..a51aff2a65 100644 --- a/ckb-bin/src/setup_guard.rs +++ b/ckb-bin/src/setup_guard.rs @@ -46,7 +46,7 @@ impl SetupGuard { sentry_config.dsn ); - let guard = sentry_config.init(&version); + let guard = sentry_config.init(version); sentry::configure_scope(|scope| { scope.set_tag("subcommand", &setup.subcommand_name); diff --git a/ckb-bin/src/subcommand/list_hashes.rs b/ckb-bin/src/subcommand/list_hashes.rs index 1df6833044..17888652c8 100644 --- a/ckb-bin/src/subcommand/list_hashes.rs +++ b/ckb-bin/src/subcommand/list_hashes.rs @@ -5,7 +5,6 @@ use ckb_types::{packed::CellOutput, prelude::*, H256}; use clap::ArgMatches; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; -use std::convert::{TryFrom, TryInto}; use std::path::PathBuf; #[derive(Clone, Debug, Serialize, Deserialize)] diff --git a/db-migration/Cargo.toml b/db-migration/Cargo.toml index 4dd6be3844..8fcbf37f51 100644 --- a/db-migration/Cargo.toml +++ b/db-migration/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-db-migration" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/db-migration/src/lib.rs b/db-migration/src/lib.rs index 662c2169a3..3f3ac818e5 100644 --- a/db-migration/src/lib.rs +++ b/db-migration/src/lib.rs @@ -155,7 +155,7 @@ impl Migrations { /// Initial db version pub fn init_db_version(&self, db: &RocksDB) -> Result<(), Error> { - let db_version = self.get_migration_version(&db)?; + let db_version = self.get_migration_version(db)?; if db_version.is_none() { if let Some(m) = self.migrations.values().last() { info!("Init database version {}", m.version()); diff --git a/db-schema/Cargo.toml b/db-schema/Cargo.toml index d6ab0f3571..dc16b98adf 100644 --- a/db-schema/Cargo.toml +++ b/db-schema/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-db-schema" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The schema include constants define the low level database column families." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/db/Cargo.toml b/db/Cargo.toml index 6dafb7e829..33497e002d 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-db" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The ckb data persistent implementation" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/db/src/db.rs b/db/src/db.rs index a7d0a3aa2d..42575daecc 100644 --- a/db/src/db.rs +++ b/db/src/db.rs @@ -57,7 +57,7 @@ impl RocksDB { let opts = Options::default(); let cf_descriptors: Vec<_> = cf_names .iter() - .map(|ref c| ColumnFamilyDescriptor::new(*c, Options::default())) + .map(|c| ColumnFamilyDescriptor::new(c, Options::default())) .collect(); (opts, cf_descriptors) }; @@ -285,7 +285,7 @@ impl RocksDB { /// CompactRange waits while compaction is performed on the background threads and thus is a blocking call. pub fn compact_range(&self, col: Col, start: Option<&[u8]>, end: Option<&[u8]>) -> Result<()> { let cf = cf_handle(&self.inner, col)?; - self.inner.compact_range_cf(&cf, start, end); + self.inner.compact_range_cf(cf, start, end); Ok(()) } diff --git a/devtools/doc/rpc.py b/devtools/doc/rpc.py index 01816a7f2f..a7f3bad8dd 100755 --- a/devtools/doc/rpc.py +++ b/devtools/doc/rpc.py @@ -38,7 +38,7 @@ ## Minimum Supported Rust Version policy (MSRV) -The crate `ckb-rpc`'s minimum supported rustc version is 1.51.0. +The crate `ckb-rpc`'s minimum supported rustc version is 1.56.1. """ @@ -46,6 +46,7 @@ TYMETHOD_DOT = 'tymethod.' HREF_PREFIX_RPCERROR = '../enum.RPCError.html#variant.' +RUST_DOC_PREFIX = 'https://doc.rust-lang.org/1.56.1' NAME_PREFIX_SELF = '(&self, ' @@ -247,35 +248,56 @@ def handle_starttag(self, tag, attrs): self.ty = None return - if self.ty == 'https://doc.rust-lang.org/nightly/std/primitive.unit.html': + if self.ty == RUST_DOC_PREFIX + '/std/primitive.unit.html' : self.ty = '`null`' - if self.ty == 'https://doc.rust-lang.org/nightly/std/primitive.bool.html': + if self.ty == RUST_DOC_PREFIX + '/std/primitive.bool.html': self.ty = '`boolean`' - if self.ty == 'https://doc.rust-lang.org/nightly/alloc/string/struct.String.html': + if self.ty == RUST_DOC_PREFIX + '/alloc/string/struct.String.html': self.ty = '`string`' - elif self.ty == 'https://doc.rust-lang.org/nightly/core/option/enum.Option.html': + elif self.ty == RUST_DOC_PREFIX + '/core/option/enum.Option.html': self.require_children(1) - elif self.ty == 'https://doc.rust-lang.org/nightly/alloc/vec/struct.Vec.html': + elif self.ty == RUST_DOC_PREFIX + '/alloc/vec/struct.Vec.html': self.require_children(1) - elif self.ty == 'https://doc.rust-lang.org/nightly/std/collections/hash/map/struct.HashMap.html': + elif self.ty == RUST_DOC_PREFIX + '/std/collections/hash/map/struct.HashMap.html': self.require_children(2) elif self.ty == '../../ckb_jsonrpc_types/enum.ResponseFormat.html': self.require_children(2) - elif self.ty.startswith('../') and '/struct.' in self.ty: - PENDING_TYPES.add(self.ty) - type_name = self.ty.split('/struct.')[1][:-5] - self.ty = '[`{}`](#type-{})'.format(type_name, - type_name.lower()) - elif self.ty.startswith('../') and '/type.' in self.ty: - PENDING_TYPES.add(self.ty) - type_name = self.ty.split('/type.')[1][:-5] - self.ty = '[`{}`](#type-{})'.format(type_name, - type_name.lower()) - elif self.ty.startswith('../') and '/enum.' in self.ty: - PENDING_TYPES.add(self.ty) - type_name = self.ty.split('/enum.')[1][:-5] - self.ty = '[`{}`](#type-{})'.format(type_name, - type_name.lower()) + elif self.ty.startswith('../'): + if '/struct.' in self.ty: + PENDING_TYPES.add(self.ty) + type_name = self.ty.split('/struct.')[1][:-5] + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + elif '/type.' in self.ty: + PENDING_TYPES.add(self.ty) + type_name = self.ty.split('/type.')[1][:-5] + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + elif '/enum.' in self.ty: + PENDING_TYPES.add(self.ty) + type_name = self.ty.split('/enum.')[1][:-5] + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + + # after 1.56 rustdoc change relative link + # now relative link do not start with '../' + elif 'title' in attrs_dict and 'ckb_jsonrpc_types::' in attrs_dict['title']: + if ('class', 'struct') in attrs and attrs_dict['title'].startswith('struct') and self.ty.startswith('struct.'): + type_name = self.ty.split('struct.')[1][:-5] + PENDING_TYPES.add('ckb_jsonrpc_types/' + self.ty) + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + elif ('class', 'type') in attrs and attrs_dict['title'].startswith('type') and self.ty.startswith('type.'): + type_name = self.ty.split('type.')[1][:-5] + PENDING_TYPES.add('ckb_jsonrpc_types/' + self.ty) + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + elif ('class', 'enum') in attrs and attrs_dict['title'].startswith('enum') and self.ty.startswith('enum.'): + type_name = self.ty.split('enum.')[1][:-5] + PENDING_TYPES.add('ckb_jsonrpc_types/' + self.ty) + self.ty = '[`{}`](#type-{})'.format(type_name, + type_name.lower()) + else: if self.completed_children >= len(self.children): print(">>> {} {}[{}] => {} {} {}".format( @@ -283,11 +305,11 @@ def handle_starttag(self, tag, attrs): self.children[self.completed_children].handle_starttag(tag, attrs) if self.children[self.completed_children].completed(): if self.completed(): - if self.ty == 'https://doc.rust-lang.org/nightly/core/option/enum.Option.html': + if self.ty == RUST_DOC_PREFIX + '/core/option/enum.Option.html': self.ty = '{} `|` `null`'.format(self.children[0].ty) - elif self.ty == 'https://doc.rust-lang.org/nightly/alloc/vec/struct.Vec.html': + elif self.ty == RUST_DOC_PREFIX + '/alloc/vec/struct.Vec.html': self.ty = '`Array<` {} `>`'.format(self.children[0].ty) - elif self.ty == 'https://doc.rust-lang.org/nightly/std/collections/hash/map/struct.HashMap.html': + elif self.ty == RUST_DOC_PREFIX + '/std/collections/hash/map/struct.HashMap.html': self.ty = '`{{ [ key:` {} `]: ` {} `}}`'.format( self.children[0].ty, self.children[1].ty) elif self.ty == '../../ckb_jsonrpc_types/enum.ResponseFormat.html': @@ -394,7 +416,7 @@ def handle_starttag(self, tag, attrs): if self.doc_parser is None and tag == 'div' and attrs == [("class", "docblock")]: self.active_parser = self.doc_parser = MarkdownParser( title_level=3) - elif tag == 'h3' and ('class', 'method') in attrs: + elif tag == 'div' and ('class', 'method has-srclink') in attrs: id = dict(attrs)['id'] if id.startswith(TYMETHOD_DOT): self.active_parser = RPCMethod(id[len(TYMETHOD_DOT):]) @@ -616,7 +638,7 @@ def __init__(self, name, path): if '/enum.' in path and self.name != 'RawTxPool': self.schema = EnumSchema(self.name) - elif '/struct.' in path: + elif '/struct.' in path and self.name != 'ProposalShortId': self.schema = StructSchema(self.name) else: self.schema = None diff --git a/error/Cargo.toml b/error/Cargo.toml index 74618acefa..2bc08ffee0 100644 --- a/error/Cargo.toml +++ b/error/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-error" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Underlying error types used over ckb crates" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/freezer/Cargo.toml b/freezer/Cargo.toml index aaf8c72f61..8c61773d26 100644 --- a/freezer/Cargo.toml +++ b/freezer/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-freezer" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Freezer is an memory mapped append-only database to store immutable chain data into flat files" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/freezer/src/freezer_files.rs b/freezer/src/freezer_files.rs index a446b53251..e3732f3641 100644 --- a/freezer/src/freezer_files.rs +++ b/freezer/src/freezer_files.rs @@ -2,7 +2,6 @@ use ckb_metrics::metrics; use fail::fail_point; use lru::LruCache; use snap::raw::{Decoder as SnappyDecoder, Encoder as SnappyEncoder}; -use std::convert::TryInto; use std::fs::{self, File}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use std::io::{Read, Write}; diff --git a/miner/Cargo.toml b/miner/Cargo.toml index 58d524ece5..1a9750a55f 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-miner" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/miner/src/worker/dummy.rs b/miner/src/worker/dummy.rs index 8ffcf1890d..6bf9f8f326 100644 --- a/miner/src/worker/dummy.rs +++ b/miner/src/worker/dummy.rs @@ -7,7 +7,6 @@ use ckb_types::packed::Byte32; use indicatif::ProgressBar; use rand::thread_rng; use rand_distr::{self as dist, Distribution as _}; -use std::convert::TryFrom; use std::thread; use std::time::Duration; diff --git a/network/Cargo.toml b/network/Cargo.toml index 8f2db2b5ac..42d53885f0 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-network" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "ckb network implementation" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/network/src/compress.rs b/network/src/compress.rs index 12a935cf51..6193d1dce7 100644 --- a/network/src/compress.rs +++ b/network/src/compress.rs @@ -1,3 +1,5 @@ +//!ckb network compress module + use ckb_logger::debug; use p2p::bytes::{BufMut, Bytes, BytesMut}; use snap::raw::{decompress_len, Decoder as SnapDecoder, Encoder as SnapEncoder}; diff --git a/network/src/network.rs b/network/src/network.rs index 665261f302..ce5bfb712d 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -320,7 +320,7 @@ impl NetworkState { trace!("Do not dial self: {:?}, {}", peer_id, addr); return false; } - if self.public_addrs.read().contains(&addr) { + if self.public_addrs.read().contains(addr) { trace!( "Do not dial listened address(self): {:?}, {}", peer_id, @@ -582,7 +582,7 @@ impl ServiceHandle for EventHandler { let message = format!("ProtocolError id={}", proto_id); // Ban because misbehave of remote peer self.network_state.ban_session( - &context.control(), + context.control(), id, Duration::from_secs(300), message, @@ -624,7 +624,7 @@ impl ServiceHandle for EventHandler { if let ProtocolHandleErrorKind::AbnormallyClosed(opt_session_id) = error { if let Some(id) = opt_session_id { self.network_state.ban_session( - &context.control(), + context.control(), id, Duration::from_secs(300), format!("protocol {} panic when process peer message", proto_id), @@ -1265,7 +1265,6 @@ impl NetworkController { let now = Instant::now(); loop { let target = target - .clone() .map(TargetSession::Single) .unwrap_or(TargetSession::All); let result = if quick { diff --git a/network/src/network_group.rs b/network/src/network_group.rs index 74a05c2613..a77b468462 100644 --- a/network/src/network_group.rs +++ b/network/src/network_group.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; #[derive(Hash, Eq, PartialEq, Debug)] pub enum Group { - NoGroup, + None, LocalNetwork, IP4([u8; 2]), IP6([u8; 4]), @@ -38,6 +38,6 @@ impl From<&Multiaddr> for Group { } } // Can't group addr - Group::NoGroup + Group::None } } diff --git a/network/src/peer_store/addr_manager.rs b/network/src/peer_store/addr_manager.rs index f6f5a44070..1ddd6e347c 100644 --- a/network/src/peer_store/addr_manager.rs +++ b/network/src/peer_store/addr_manager.rs @@ -102,7 +102,7 @@ impl AddrManager { multiaddr_to_socketaddr(addr).and_then(|addr| { self.addr_to_id .get(&addr) - .and_then(|id| self.id_to_info.get(&id)) + .and_then(|id| self.id_to_info.get(id)) }) } diff --git a/network/src/peer_store/ban_list.rs b/network/src/peer_store/ban_list.rs index bcdbd07ad6..29d860fe78 100644 --- a/network/src/peer_store/ban_list.rs +++ b/network/src/peer_store/ban_list.rs @@ -42,7 +42,7 @@ impl BanList { /// Unban address pub fn unban_network(&mut self, ip_network: &IpNetwork) { - self.inner.remove(&ip_network); + self.inner.remove(ip_network); } fn is_ip_banned_until(&self, ip: IpAddr, now_ms: u64) -> bool { diff --git a/network/src/peer_store/peer_store_impl.rs b/network/src/peer_store/peer_store_impl.rs index 9333d32295..19dec04c97 100644 --- a/network/src/peer_store/peer_store_impl.rs +++ b/network/src/peer_store/peer_store_impl.rs @@ -250,7 +250,7 @@ impl PeerStore { .collect(); for key in candidate_peers.iter() { - self.addr_manager.remove(&key); + self.addr_manager.remove(key); } if candidate_peers.is_empty() { @@ -292,7 +292,7 @@ impl PeerStore { }; for key in candidate_peers.iter() { - self.addr_manager.remove(&key); + self.addr_manager.remove(key); } if candidate_peers.is_empty() { diff --git a/network/src/protocols/discovery/mod.rs b/network/src/protocols/discovery/mod.rs index 6cb0978336..556869e82e 100644 --- a/network/src/protocols/discovery/mod.rs +++ b/network/src/protocols/discovery/mod.rs @@ -97,7 +97,7 @@ impl ServiceProtocol for DiscoveryProtocol { let mgr = &mut self.addr_mgr; let mut check = |behavior: Misbehavior| -> bool { - if mgr.misbehave(&session, &behavior).is_disconnect() { + if mgr.misbehave(session, &behavior).is_disconnect() { if context.disconnect(session.id).is_err() { error!("disconnect {:?} send fail", session.id) } @@ -184,9 +184,7 @@ impl ServiceProtocol for DiscoveryProtocol { if let Some(state) = self.sessions.get_mut(&session.id) { if !nodes.announce && state.received_nodes { warn!("already received Nodes(announce=false) message"); - if check(Misbehavior::DuplicateFirstNodes) { - return; - } + check(Misbehavior::DuplicateFirstNodes); } else { let addrs = nodes .items @@ -211,7 +209,7 @@ impl ServiceProtocol for DiscoveryProtocol { None => { if self .addr_mgr - .misbehave(&session, &Misbehavior::InvalidData) + .misbehave(session, &Misbehavior::InvalidData) .is_disconnect() && context.disconnect(session.id).is_err() { @@ -324,7 +322,7 @@ impl AddressManager for DiscoveryAddressManager { fn is_valid_addr(&self, addr: &Multiaddr) -> bool { if !self.discovery_local_address { - let local_or_invalid = multiaddr_to_socketaddr(&addr) + let local_or_invalid = multiaddr_to_socketaddr(addr) .map(|socket_addr| !is_reachable(socket_addr.ip())) .unwrap_or(true); !local_or_invalid diff --git a/network/src/protocols/discovery/protocol.rs b/network/src/protocols/discovery/protocol.rs index 41aeab3c1f..2b77a2f1ce 100644 --- a/network/src/protocols/discovery/protocol.rs +++ b/network/src/protocols/discovery/protocol.rs @@ -1,5 +1,3 @@ -use std::convert::TryFrom; - use p2p::{ bytes::{Bytes, BytesMut}, multiaddr::Multiaddr, @@ -27,7 +25,7 @@ pub(crate) fn encode(data: DiscoveryMessage, v2: bool) -> Bytes { pub(crate) fn decode(data: &Bytes, v2: bool) -> Option { if v2 { - DiscoveryMessage::decode(&data) + DiscoveryMessage::decode(data) } else { let mut data = BytesMut::from(data.as_ref()); // Length Delimited Codec is not a mandatory requirement. diff --git a/network/src/protocols/identify/mod.rs b/network/src/protocols/identify/mod.rs index efc87bbbf7..cbc8cf4835 100644 --- a/network/src/protocols/identify/mod.rs +++ b/network/src/protocols/identify/mod.rs @@ -144,7 +144,7 @@ impl IdentifyProtocol { }) .collect::>(); self.callback - .add_remote_listen_addrs(&session, reachable_addrs); + .add_remote_listen_addrs(session, reachable_addrs); MisbehaveResult::Continue } } diff --git a/network/src/protocols/identify/protocol.rs b/network/src/protocols/identify/protocol.rs index 189e66db63..aa9cf70eff 100644 --- a/network/src/protocols/identify/protocol.rs +++ b/network/src/protocols/identify/protocol.rs @@ -1,7 +1,6 @@ use p2p::{bytes::Bytes, multiaddr::Multiaddr}; use ckb_types::{packed, prelude::*}; -use std::convert::TryFrom; #[derive(Clone, PartialEq, Eq, Debug)] pub struct IdentifyMessage<'a> { diff --git a/network/src/services/dns_seeding/mod.rs b/network/src/services/dns_seeding/mod.rs index 454da891e1..845760bd9a 100644 --- a/network/src/services/dns_seeding/mod.rs +++ b/network/src/services/dns_seeding/mod.rs @@ -77,7 +77,7 @@ impl DnsSeedingService { for inner in record.iter() { match std::str::from_utf8(inner) { Ok(record) => { - match SeedRecord::decode_with_pubkey(&record, &pubkey) { + match SeedRecord::decode_with_pubkey(record, &pubkey) { Ok(seed_record) => { let address = seed_record.address(); trace!("got dns txt address: {}", address); diff --git a/notify/Cargo.toml b/notify/Cargo.toml index b65efa20e5..9c1593fce0 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-notify" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/pow/Cargo.toml b/pow/Cargo.toml index 681333f908..1dd0f530f9 100644 --- a/pow/Cargo.toml +++ b/pow/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-pow" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/resource/Cargo.toml b/resource/Cargo.toml index bf00d2a985..bb1877433c 100644 --- a/resource/Cargo.toml +++ b/resource/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-resource" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" build = "build.rs" description = "Bundled resources for the CKB binary." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 973ec18d64..71a3141318 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-rpc" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB RPC server." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/rpc/README.md b/rpc/README.md index 109a314329..562a7664ba 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -24,7 +24,7 @@ For example, a method is marked as deprecated in 0.35.0, it can be disabled in 0 ## Minimum Supported Rust Version policy (MSRV) -The crate `ckb-rpc`'s minimum supported rustc version is 1.51.0. +The crate `ckb-rpc`'s minimum supported rustc version is 1.56.1. ## Table of Contents @@ -188,6 +188,7 @@ This RPC returns `null` on success. Request + ``` { "jsonrpc": "2.0", @@ -209,8 +210,10 @@ Request } ``` + Response + ``` { "error": { @@ -224,6 +227,7 @@ Response } ``` + ### Module Chain RPC Module Chain for methods related to the canonical chain. @@ -274,6 +278,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -285,8 +290,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -346,8 +353,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -356,6 +365,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_block_by_number` * `get_block_by_number(block_number, verbosity)` * `block_number`: [`BlockNumber`](#type-blocknumber) @@ -390,6 +400,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -401,8 +412,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -462,8 +475,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -472,6 +487,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_header` * `get_header(block_hash, verbosity)` * `block_hash`: [`H256`](#type-h256) @@ -500,6 +516,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -511,8 +528,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -534,8 +553,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -544,6 +565,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_header_by_number` * `get_header_by_number(block_number, verbosity)` * `block_number`: [`BlockNumber`](#type-blocknumber) @@ -576,6 +598,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -587,8 +610,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -610,8 +635,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -620,6 +647,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_transaction` * `get_transaction(tx_hash, verbosity)` * `tx_hash`: [`H256`](#type-h256) @@ -652,6 +680,7 @@ When verbosity is 2: if tx_status.status is pending, proposed, or committed, the Request + ``` { "id": 42, @@ -663,8 +692,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -719,6 +750,7 @@ Response } ``` + #### Method `get_block_hash` * `get_block_hash(block_number)` * `block_number`: [`BlockNumber`](#type-blocknumber) @@ -740,6 +772,7 @@ Because of [chain reorganization](#chain-reorganization), the PRC may return nul Request + ``` { "id": 42, @@ -751,8 +784,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -761,6 +796,7 @@ Response } ``` + #### Method `get_tip_header` * `get_tip_header(verbosity)` * `verbosity`: [`Uint32`](#type-uint32) `|` `null` @@ -784,6 +820,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -793,8 +830,10 @@ Request } ``` + Response + ``` { "jsonrpc": "2.0", @@ -816,8 +855,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -826,6 +867,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_live_cell` * `get_live_cell(out_point, with_data)` * `out_point`: [`OutPoint`](#type-outpoint) @@ -852,6 +894,7 @@ If the cell is live and `with_data` is set to `false`, the field `cell.data` is Request + ``` { "id": 42, @@ -867,8 +910,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -894,6 +939,7 @@ Response } ``` + #### Method `get_tip_block_number` * `get_tip_block_number()` * result: [`BlockNumber`](#type-blocknumber) @@ -906,6 +952,7 @@ Because of [chain reorganization](#chain-reorganization), the returned block num Request + ``` { "id": 42, @@ -915,8 +962,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -925,6 +974,7 @@ Response } ``` + #### Method `get_current_epoch` * `get_current_epoch()` * result: [`EpochView`](#type-epochview) @@ -937,6 +987,7 @@ Pay attention that like blocks with the specific block number may change because Request + ``` { "id": 42, @@ -946,8 +997,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -961,6 +1014,7 @@ Response } ``` + #### Method `get_epoch_by_number` * `get_epoch_by_number(epoch_number)` * `epoch_number`: [`EpochNumber`](#type-epochnumber) @@ -982,6 +1036,7 @@ Because of [chain reorganization](#chain-reorganization), for the same `epoch_nu Request + ``` { "id": 42, @@ -993,8 +1048,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1008,6 +1065,7 @@ Response } ``` + #### Method `get_block_economic_state` * `get_block_economic_state(block_hash)` * `block_hash`: [`H256`](#type-h256) @@ -1035,6 +1093,7 @@ If the block with the hash `block_hash` is in the [canonical chain](#canonical-c Request + ``` { "id": 42, @@ -1046,8 +1105,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1069,6 +1130,7 @@ Response } ``` + #### Method `get_transaction_proof` * `get_transaction_proof(tx_hashes, block_hash)` * `tx_hashes`: `Array<` [`H256`](#type-h256) `>` @@ -1087,6 +1149,7 @@ Returns a Merkle proof that transactions are included in a block. Request + ``` { "id": 42, @@ -1098,8 +1161,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1115,6 +1180,7 @@ Response } ``` + #### Method `verify_transaction_proof` * `verify_transaction_proof(tx_proof)` * `tx_proof`: [`TransactionProof`](#type-transactionproof) @@ -1130,6 +1196,7 @@ Verifies that a proof points to transactions in a block, returning the transacti Request + ``` { "id": 42, @@ -1148,8 +1215,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1160,6 +1229,7 @@ Response } ``` + #### Method `get_fork_block` * `get_fork_block(block_hash, verbosity)` * `block_hash`: [`H256`](#type-h256) @@ -1188,6 +1258,7 @@ When `verbosity` is 0, it returns a 0x-prefixed hex string as the `result`. The Request + ``` { "id": 42, @@ -1199,8 +1270,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1260,8 +1333,10 @@ Response } ``` + The response looks like below when `verbosity` is 0. + ``` { "id": 42, @@ -1270,6 +1345,7 @@ The response looks like below when `verbosity` is 0. } ``` + #### Method `get_consensus` * `get_consensus()` * result: [`Consensus`](#type-consensus) @@ -1284,6 +1360,7 @@ If any hardfork feature has `epoch=null`, it means the feature will never be act Request + ``` { "id": 42, @@ -1293,8 +1370,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1344,6 +1423,7 @@ Response } ``` + #### Method `get_block_median_time` * `get_block_median_time(block_hash)` * `block_hash`: [`H256`](#type-h256) @@ -1365,6 +1445,7 @@ Note that the given block is included in the median time. The included block num Request + ``` { "id": 42, @@ -1376,8 +1457,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1386,6 +1469,7 @@ Response } ``` + ### Module Experiment RPC Module Experiment for experimenting methods. @@ -1415,6 +1499,7 @@ It is used to debug transaction scripts and query how many cycles the scripts co Request + ``` { "id": 42, @@ -1464,8 +1549,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1476,6 +1563,7 @@ Response } ``` + #### Method `calculate_dao_maximum_withdraw` * `calculate_dao_maximum_withdraw(out_point, kind)` * `out_point`: [`OutPoint`](#type-outpoint) @@ -1486,13 +1574,13 @@ Calculates the maximum withdrawal one can get, given a referenced DAO cell, and ##### Params -* `out_point` - Reference to the DAO cell, the depositing transaction's output. +* `out_point` - Reference to the DAO cell, the depositing transaction’s output. * `kind` - Two kinds of dao withdrawal amount calculation option. -option 1, the assumed reference block hash for withdrawing phase 1 transaction, this block must be in the [canonical chain](#canonical-chain), the calculation of occupied capacity will be based on the depositing transaction's output, assuming the output of phase 1 transaction is the same as the depositing transaction's output. +option 1, the assumed reference block hash for withdrawing phase 1 transaction, this block must be in the [canonical chain](#canonical-chain), the calculation of occupied capacity will be based on the depositing transaction’s output, assuming the output of phase 1 transaction is the same as the depositing transaction’s output. -option 2, the out point of the withdrawing phase 1 transaction, the calculation of occupied capacity will be based on corresponding phase 1 transaction's output. +option 2, the out point of the withdrawing phase 1 transaction, the calculation of occupied capacity will be based on corresponding phase 1 transaction’s output. ##### Returns @@ -1510,6 +1598,7 @@ In CKB, scripts cannot get the information about in which block the transaction Request + ``` { "id": 42, @@ -1525,8 +1614,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1535,6 +1626,7 @@ Response } ``` + ### Module Miner RPC Module Miner for miners. @@ -1564,6 +1656,7 @@ Miners can assemble the new block from the template. The RPC is designed to allo Request + ``` { "id": 42, @@ -1577,8 +1670,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1657,6 +1752,7 @@ Response } ``` + #### Method `submit_block` * `submit_block(work_id, block)` * `work_id`: `string` @@ -1675,6 +1771,7 @@ Submit new block to the network. Request + ``` { "id": 42, @@ -1736,8 +1833,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1746,6 +1845,7 @@ Response } ``` + ### Module Net RPC Module Net for P2P network. @@ -1762,6 +1862,7 @@ The local node means the node itself which is serving the RPC. Request + ``` { "id": 42, @@ -1771,8 +1872,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1812,16 +1915,18 @@ Response } ``` + #### Method `get_peers` * `get_peers()` * result: `Array<` [`RemoteNode`](#type-remotenode) `>` -Returns the connected peers' information. +Returns the connected peers’ information. ##### Examples Request + ``` { "id": 42, @@ -1831,8 +1936,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1958,6 +2065,7 @@ Response } ``` + #### Method `get_banned_addresses` * `get_banned_addresses()` * result: `Array<` [`BannedAddr`](#type-bannedaddr) `>` @@ -1968,6 +2076,7 @@ Returns all banned IPs/Subnets. Request + ``` { "id": 42, @@ -1977,8 +2086,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -1994,6 +2105,7 @@ Response } ``` + #### Method `clear_banned_addresses` * `clear_banned_addresses()` * result: `null` @@ -2004,6 +2116,7 @@ Clears all banned IPs/Subnets. Request + ``` { "id": 42, @@ -2013,8 +2126,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2023,6 +2138,7 @@ Response } ``` + #### Method `set_ban` * `set_ban(address, command, ban_time, absolute, reason)` * `address`: `string` @@ -2037,9 +2153,9 @@ Inserts or deletes an IP/Subnet from the banned list ##### Params * `address` - The IP/Subnet with an optional netmask (default is /32 = single IP). Examples: - * "192.168.0.2" bans a single IP + * “192.168.0.2” bans a single IP - * "192.168.0.0/24" bans IP from "192.168.0.0" to "192.168.0.255". + * “192.168.0.0/24” bans IP from “192.168.0.0” to “192.168.0.255”. * `command` - `insert` to insert an IP/Subnet to the list, `delete` to delete an IP/Subnet from the list. @@ -2062,6 +2178,7 @@ Inserts or deletes an IP/Subnet from the banned list Request + ``` { "id": 42, @@ -2077,8 +2194,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2087,6 +2206,7 @@ Response } ``` + #### Method `sync_state` * `sync_state()` * result: [`SyncState`](#type-syncstate) @@ -2097,6 +2217,7 @@ Returns chain synchronization state of this node. Request + ``` { "id": 42, @@ -2106,8 +2227,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2125,6 +2248,7 @@ Response } ``` + #### Method `set_network_active` * `set_network_active(state)` * `state`: `boolean` @@ -2140,6 +2264,7 @@ Disable/enable all p2p network activity Request + ``` { "id": 42, @@ -2151,8 +2276,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2161,6 +2288,7 @@ Response } ``` + #### Method `add_node` * `add_node(peer_id, address)` * `peer_id`: `string` @@ -2177,13 +2305,16 @@ Attempts to add a node to the peers list and try connecting to it. The full P2P address is usually displayed as `address/peer_id`, for example in the log + ``` 2020-09-16 15:31:35.191 +08:00 NetworkRuntime INFO ckb_network::network Listen on address: /ip4/192.168.2.100/tcp/8114/QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS ``` + And in RPC `local_node_info`: + ``` { "addresses": [ @@ -2195,6 +2326,7 @@ And in RPC `local_node_info`: } ``` + In both of these examples, * `peer_id` is `QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS`, @@ -2205,6 +2337,7 @@ In both of these examples, Request + ``` { "id": 42, @@ -2217,8 +2350,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2227,6 +2362,7 @@ Response } ``` + #### Method `remove_node` * `remove_node(peer_id)` * `peer_id`: `string` @@ -2238,12 +2374,13 @@ Attempts to remove a node from the peers list and try disconnecting from it. * `peer_id` - The peer id of the node. -This is the last part of a full P2P address. For example, in address "/ip4/192.168.2.100/tcp/8114/QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS", the `peer_id` is `QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS`. +This is the last part of a full P2P address. For example, in address “/ip4/192.168.2.100/tcp/8114/QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS”, the `peer_id` is `QmUsZHPbjjzU627UZFt4k8j6ycEcNvXRnVGxCPKqwbAfQS`. ##### Examples Request + ``` { "id": 42, @@ -2255,8 +2392,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2265,6 +2404,7 @@ Response } ``` + #### Method `ping_peers` * `ping_peers()` * result: `null` @@ -2275,6 +2415,7 @@ Requests that a ping is sent to all connected peers, to measure ping time. Requests + ``` { "id": 42, @@ -2284,8 +2425,10 @@ Requests } ``` + Response + ``` { "id": 42, @@ -2294,6 +2437,7 @@ Response } ``` + ### Module Pool RPC Module Pool for transaction memory pool. @@ -2310,11 +2454,11 @@ Submits a new transaction into the transaction pool. If the transaction is alrea * `transaction` - The transaction. -* `outputs_validator` - Validates the transaction outputs before entering the tx-pool. (**Optional**, default is "well_known_scripts_only"). +* `outputs_validator` - Validates the transaction outputs before entering the tx-pool. (**Optional**, default is “well_known_scripts_only”). ##### Errors -* [`PoolRejectedTransactionByOutputsValidator (-1102)`](#error-poolrejectedtransactionbyoutputsvalidator) - The transaction is rejected by the validator specified by `outputs_validator`. If you really want to send transactions with advanced scripts, please set `outputs_validator` to "passthrough". +* [`PoolRejectedTransactionByOutputsValidator (-1102)`](#error-poolrejectedtransactionbyoutputsvalidator) - The transaction is rejected by the validator specified by `outputs_validator`. If you really want to send transactions with advanced scripts, please set `outputs_validator` to “passthrough”. * [`PoolRejectedTransactionByIllTransactionChecker (-1103)`](#error-poolrejectedtransactionbyilltransactionchecker) - Pool rejects some transactions which seem contain invalid VM instructions. See the issue link in the error message for details. @@ -2334,6 +2478,7 @@ Submits a new transaction into the transaction pool. If the transaction is alrea Request + ``` { "id": 42, @@ -2384,8 +2529,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2394,6 +2541,7 @@ Response } ``` + #### Method `remove_transaction` * `remove_transaction(tx_hash)` * `tx_hash`: [`H256`](#type-h256) @@ -2413,6 +2561,7 @@ If the transaction exists, return true; otherwise, return false. Request + ``` { "id": 42, @@ -2424,8 +2573,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2434,6 +2585,7 @@ Response } ``` + #### Method `tx_pool_info` * `tx_pool_info()` * result: [`TxPoolInfo`](#type-txpoolinfo) @@ -2444,6 +2596,7 @@ Returns the transaction pool information. Request + ``` { "id": 42, @@ -2453,8 +2606,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2473,6 +2628,7 @@ Response } ``` + #### Method `clear_tx_pool` * `clear_tx_pool()` * result: `null` @@ -2483,6 +2639,7 @@ Removes all transactions from the transaction pool. Request + ``` { "id": 42, @@ -2492,8 +2649,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2502,6 +2661,7 @@ Response } ``` + #### Method `get_raw_tx_pool` * `get_raw_tx_pool(verbose)` * `verbose`: `boolean` `|` `null` @@ -2517,6 +2677,7 @@ Returns all transaction ids in tx pool as a json array of string transaction ids Request + ``` { "id": 42, @@ -2526,8 +2687,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2550,6 +2713,7 @@ Response } ``` + #### Method `tx_pool_ready` * `tx_pool_ready()` * result: `boolean` @@ -2560,6 +2724,7 @@ Returns whether tx-pool service is started, ready for request. Request + ``` { "id": 42, @@ -2569,8 +2734,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2579,6 +2746,7 @@ Response } ``` + ### Module Stats RPC Module Stats for getting various statistic data. @@ -2593,6 +2761,7 @@ Returns statistics about the chain. Request + ``` { "id": 42, @@ -2602,8 +2771,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2626,6 +2797,7 @@ Response } ``` + ### Module Subscription RPC Module Subscription that CKB node will push new messages to subscribers. @@ -2636,6 +2808,7 @@ RPC subscriptions require a full duplex connection. CKB offers such connections TCP RPC subscription: + ``` telnet localhost 18114 > {"id": 2, "jsonrpc": "2.0", "method": "subscribe", "params": ["new_tip_header"]} @@ -2649,8 +2822,10 @@ telnet localhost 18114 < {"jsonrpc":"2.0","result":true,"id":2} ``` + WebSocket RPC subscription: + ``` let socket = new WebSocket("ws://localhost:28114") @@ -2663,6 +2838,7 @@ socket.send(`{"id": 2, "jsonrpc": "2.0", "method": "subscribe", "params": ["new_ socket.send(`{"id": 2, "jsonrpc": "2.0", "method": "unsubscribe", "params": [0]}`) ``` + #### Method `subscribe` * `subscribe(topic)` * `topic`: `string` @@ -2680,6 +2856,7 @@ This RPC returns the subscription ID as the result. CKB node will push messages Example push message: + ``` { "jsonrpc": "2.0", @@ -2691,17 +2868,18 @@ Example push message: } ``` + ##### Topics ###### `new_tip_header` -Whenever there's a block that is appended to the canonical chain, the CKB node will publish the block header to subscribers. +Whenever there’s a block that is appended to the canonical chain, the CKB node will publish the block header to subscribers. The type of the `params.result` in the push message is [`HeaderView`](#type-headerview). ###### `new_tip_block` -Whenever there's a block that is appended to the canonical chain, the CKB node will publish the whole block to subscribers. +Whenever there’s a block that is appended to the canonical chain, the CKB node will publish the whole block to subscribers. The type of the `params.result` in the push message is [`BlockView`](#type-blockview). @@ -2733,6 +2911,7 @@ The type of the `params.result` in the push message is a two-elements array, whe Request + ``` { "id": 42, @@ -2744,8 +2923,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2754,6 +2935,7 @@ Response } ``` + #### Method `unsubscribe` * `unsubscribe(id)` * `id`: `string` @@ -2769,6 +2951,7 @@ Unsubscribes from a subscribed topic. Request + ``` { "id": 42, @@ -2780,8 +2963,10 @@ Request } ``` + Response + ``` { "id": 42, @@ -2791,6 +2976,7 @@ Response ``` + ## RPC Errors CKB RPC error codes. @@ -2904,10 +3090,12 @@ This is a fatal error usually caused by the underlying database used by CKB. Ple The fee rate is calculated as: + ``` fee / (1000 * tx_serialization_size_in_block_in_bytes) ``` + ### Error `PoolRejectedTransactionByMaxAncestorsCountLimit` (-1105): The in-pool ancestors count must be less than or equal to the config option `tx_pool.max_ancestors_count` @@ -3021,7 +3209,7 @@ A banned P2P address. * `address`: `string` - The P2P address. - Example: "/ip4/192.168.0.2/tcp/8112/p2p/QmTRHCdrRtgUzYLNCin69zEvPvLYdxUZLLfLYyHVY3DZAS" + Example: “/ip4/192.168.0.2/tcp/8112/p2p/QmTRHCdrRtgUzYLNCin69zEvPvLYdxUZLLfLYyHVY3DZAS” * `ban_until`: [`Timestamp`](#type-timestamp) - The address is banned until this time. @@ -3165,7 +3353,7 @@ Miners optional pick transactions and then assemble the final block. * `extension`: [`JsonBytes`](#type-jsonbytes) `|` `null` - The extension for the new block. - This field is optional. It's a reserved field, please leave it blank. More details can be found in [CKB RFC 0031](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0031-variable-length-header-field/0031-variable-length-header-field.md). + This field is optional. It’s a reserved field, please leave it blank. More details can be found in [CKB RFC 0031](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0031-variable-length-header-field/0031-variable-length-header-field.md). ### Type `BlockView` @@ -3191,11 +3379,18 @@ Fixed-length 32 bytes binary encoded as a 0x-prefixed hex string in JSON. #### Example + ``` 0xd495a106684401001e47c0ae1d5930009449d26e32380000000721efd0030000 ``` +#### Fields + +`Byte32` is a JSON object with the following fields. + +* `0`: https://doc.rust-lang.org/1.56.1/std/primitive.array.html - Creates Bytes from the array. + ### Type `Capacity` @@ -3352,7 +3547,7 @@ The fields of an output cell except the cell data. * `type_`: [`Script`](#type-script) `|` `null` - The optional type script. - The JSON field name is "type". + The JSON field name is “type”. ### Type `CellWithStatus` @@ -3403,7 +3598,7 @@ The JSON view of a cell with its status information. * `status`: `string` - Status of the cell. - Allowed values: "live", "dead", "unknown". + Allowed values: “live”, “dead”, “unknown”. * `live` - The transaction creating this cell is in the chain, and there are no transactions found in the chain that uses this cell as an input. @@ -3441,9 +3636,9 @@ Chain information. Examples: - * "ckb" - Lina the mainnet. + * “ckb” - Lina the mainnet. - * "ckb_testnet" - Aggron the testnet. + * “ckb_testnet” - Aggron the testnet. * `median_time`: [`Timestamp`](#type-timestamp) - The median time of the last 37 blocks, including the tip block. @@ -3506,7 +3701,7 @@ Consensus defines various parameters that influence chain consensus * `tx_version`: [`Version`](#type-version) - The tx version number supported -* `type_id_code_hash`: [`H256`](#type-h256) - The "TYPE_ID" in hex +* `type_id_code_hash`: [`H256`](#type-h256) - The “TYPE_ID” in hex * `max_block_proposals_limit`: [`Uint64`](#type-uint64) - The Limit to the number of proposals per block @@ -3530,20 +3725,20 @@ An enum to represent the two kinds of dao withdrawal amount calculation option. `DaoWithdrawingCalculationKind` is equivalent to `"withdrawing_header_hash" | "withdrawing_out_point"`. * the assumed reference block hash for withdrawing phase 1 transaction -* the out point of the withdrawing phase 1 transaction +* Returns a copy of the value. [Read more](#method-clone) ### Type `DepType` -The dep cell type. Allowed values: "code" and "dep_group". +The dep cell type. Allowed values: “code” and “dep_group”. `DepType` is equivalent to `"code" | "dep_group"`. -* Type "code". +* Type “code”. Use the cell itself as the dep cell. -* Type "dep_group". +* Type “dep_group”. The cell is a dep group which members are cells. These members are used as dep cells instead of the group itself. @@ -3582,12 +3777,14 @@ The lower 56 bits of the epoch field are split into 3 parts (listed in the order * The lowest 24 bits represent the current epoch number. -Assume there's a block, which number is 11555 and in epoch 50. The epoch 50 starts from block 11000 and have 1000 blocks. The epoch field for this particular block will then be 1,099,520,939,130,930, which is calculated in the following way: +Assume there’s a block, which number is 11555 and in epoch 50. The epoch 50 starts from block 11000 and have 1000 blocks. The epoch field for this particular block will then be 1,099,520,939,130,930, which is calculated in the following way: + ``` 50 | ((11555 - 11000) << 24) | (1000 << 40) ``` + ### Type `EpochView` JSON view of an epoch. @@ -3630,6 +3827,11 @@ The name comes from the number of bits in the data. In JSONRPC, it is encoded as a 0x-prefixed hex string. +#### Fields + +`H256` is a JSON object with the following fields. + +* `0`: https://doc.rust-lang.org/1.56.1/std/primitive.array.html - Converts `Self` to a byte slice. ### Type `HardForkFeature` @@ -3748,11 +3950,11 @@ Variable-length binary encoded as a 0x-prefixed hex string in JSON. | JSON | Binary | | --- |--- | -| "0x" | Empty binary | -| "0x00" | Single byte 0 | -| "0x636b62" | 3 bytes, UTF-8 encoding of ckb | -| "00" | Invalid, 0x is required | -| "0x0" | Invalid, each byte requires 2 digits | +| “0x” | Empty binary | +| “0x00” | Single byte 0 | +| “0x636b62” | 3 bytes, UTF-8 encoding of ckb | +| “00” | Invalid, 0x is required | +| “0x0” | Invalid, each byte requires 2 digits | @@ -3805,7 +4007,7 @@ The information of the node itself. * `version`: `string` - CKB node version. - Example: "version": "0.34.0 (f37f598 2020-07-17)" + Example: “version”: “0.34.0 (f37f598 2020-07-17)” * `node_id`: `string` - The unique node ID derived from the p2p private key. @@ -3891,7 +4093,7 @@ Node P2P address and score. This is the same address used in the whitelist in ckb.toml. - Example: "/ip4/192.168.0.2/tcp/8112/p2p/QmTRHCdrRtgUzYLNCin69zEvPvLYdxUZLLfLYyHVY3DZAS" + Example: “/ip4/192.168.0.2/tcp/8112/p2p/QmTRHCdrRtgUzYLNCin69zEvPvLYdxUZLLfLYyHVY3DZAS” * `score`: [`Uint64`](#type-uint64) - Address score. @@ -3928,8 +4130,8 @@ Transaction output validators that prevent common mistakes. `OutputsValidator` is equivalent to `"passthrough" | "well_known_scripts_only"`. -* "passthrough": the default validator, bypass output checking, thus allow any kind of transaction outputs. -* "well_known_scripts_only": restricts the lock script and type script usage, see more information on https://github.com/nervosnetwork/ckb/wiki/Transaction-%C2%BB-Default-Outputs-Validator +* “passthrough”: the default validator, bypass output checking, thus allow any kind of transaction outputs. +* “well_known_scripts_only”: restricts the lock script and type script usage, see more information on [https://github.com/nervosnetwork/ckb/wiki/Transaction-%C2%BB-Default-Outputs-Validator](https://github.com/nervosnetwork/ckb/wiki/Transaction-%C2%BB-Default-Outputs-Validator) ### Type `PeerSyncState` @@ -3942,7 +4144,7 @@ The chain synchronization state between the local node and a remote node. * `best_known_header_hash`: [`Byte32`](#type-byte32) `|` `null` - Best known header hash of remote peer. - This is the observed tip of the remote node's canonical chain. + This is the observed tip of the remote node’s canonical chain. * `best_known_header_number`: [`Uint64`](#type-uint64) `|` `null` - Best known header number of remote peer @@ -3996,13 +4198,13 @@ TX reject message Different reject types: * `LowFeeRate`: Transaction fee lower than config -* `ExceededMaximumAncestorsCount`: Transaction exceeded maximum ancestors count limit -* `Full`: Transaction pool exceeded maximum size or cycles limit, -* `Duplicated`: Transaction already exist in transaction_pool -* `Malformed`: Malformed transaction -* `DeclaredWrongCycles`: Declared wrong cycles -* `Resolve`: Resolve failed -* `Verification`: Verification failed +* `ExceededMaximumAncestorsCount`: Transaction pool exceeded maximum size or cycles limit, +* `Full`: Transaction already exist in transaction_pool +* `Duplicated`: Malformed transaction +* `Malformed`: Declared wrong cycles +* `DeclaredWrongCycles`: Resolve failed +* `Resolve`: Verification failed +* `Verification`: Returns a copy of the value. [Read more](#method-clone) ### Type `ProposalShortId` @@ -4011,15 +4213,15 @@ The 10-byte fixed-length binary encoded as a 0x-prefixed hex string in JSON. #### Example + ``` 0xa0ef4eb5f4ceeb08a4c8 ``` - ### Type `ProposalWindow` -Two protocol parameters `closest` and `farthest` define the closest and farthest on-chain distance between a transaction's proposal and commitment. +Two protocol parameters `closest` and `farthest` define the closest and farthest on-chain distance between a transaction’s proposal and commitment. A non-cellbase transaction is committed at height h_c if all of the following conditions are met: @@ -4027,6 +4229,7 @@ A non-cellbase transaction is committed at height h_c if all of the following co * it is in the commitment zone of the main chain block with height h_c ; + ``` ProposalWindow { closest: 2, farthest: 10 } propose @@ -4038,6 +4241,7 @@ A non-cellbase transaction is committed at height h_c if all of the following co commit ``` + #### Fields `ProposalWindow` is a JSON object with the following fields. @@ -4213,15 +4417,15 @@ Describes the lock script and type script for a cell. Specifies how the script `code_hash` is used to match the script code and how to run the code. -Allowed kinds: "data", "type" and "data1". +Allowed kinds: “data”, “type” and “data1”. Refer to the section [Code Locating](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#code-locating) and [Upgradable Script](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0022-transaction-structure/0022-transaction-structure.md#upgradable-script) in the RFC *CKB Transaction Structure*. `ScriptHashType` is equivalent to `"data" | "type" | "data1"`. -* Type "data" matches script code via cell data hash, and run the script code in v0 CKB VM. -* Type "type" matches script code via cell type script hash. -* Type "data" matches script code via cell data hash, and run the script code in v1 CKB VM. +* Type “data” matches script code via cell data hash, and run the script code in v0 CKB VM. +* Type “type” matches script code via cell type script hash. +* Type “data” matches script code via cell data hash, and run the script code in v1 CKB VM. ### Type `SerializedBlock` @@ -4238,11 +4442,11 @@ Status for transaction `Status` is equivalent to `"pending" | "proposed" | "committed" | "unknown" | "rejected"`. -* Status "pending". The transaction is in the pool, and not proposed yet. -* Status "proposed". The transaction is in the pool and has been proposed. -* Status "committed". The transaction has been committed to the canonical chain. -* Status "unknown". The node has not seen the transaction, or it should be rejected but was cleared due to storage limitations. -* Status "rejected". The transaction has been recently removed from the pool. Due to storage limitations, the node can only hold the most recently removed transactions. +* Status “pending”. The transaction is in the pool, and not proposed yet. +* Status “proposed”. The transaction is in the pool and has been proposed. +* Status “committed”. The transaction has been committed to the canonical chain. +* Status “unknown”. The node has not seen the transaction, or it should be rejected but was cleared due to storage limitations. +* Status “rejected”. The transaction has been recently removed from the pool. Due to storage limitations, the node can only hold the most recently removed transactions. ### Type `SyncState` @@ -4275,11 +4479,11 @@ The overall chain synchronization state of this local node. * `inflight_blocks_count`: [`Uint64`](#type-uint64) - Count of downloading blocks. -* `fast_time`: [`Uint64`](#type-uint64) - The download scheduler's time analysis data, the fast is the 1/3 of the cut-off point, unit ms +* `fast_time`: [`Uint64`](#type-uint64) - The download scheduler’s time analysis data, the fast is the 1/3 of the cut-off point, unit ms -* `normal_time`: [`Uint64`](#type-uint64) - The download scheduler's time analysis data, the normal is the 4/5 of the cut-off point, unit ms +* `normal_time`: [`Uint64`](#type-uint64) - The download scheduler’s time analysis data, the normal is the 4/5 of the cut-off point, unit ms -* `low_time`: [`Uint64`](#type-uint64) - The download scheduler's time analysis data, the low is the 9/10 of the cut-off point, unit ms +* `low_time`: [`Uint64`](#type-uint64) - The download scheduler’s time analysis data, the low is the 9/10 of the cut-off point, unit ms ### Type `Timestamp` @@ -4341,9 +4545,9 @@ Merkle proof for transactions in a block. * `block_hash`: [`H256`](#type-h256) - Block hash -* `witnesses_root`: [`H256`](#type-h256) - Merkle root of all transactions' witness hash +* `witnesses_root`: [`H256`](#type-h256) - Merkle root of all transactions’ witness hash -* `proof`: [`MerkleProof`](#type-merkleproof) - Merkle proof of all transactions' hash +* `proof`: [`MerkleProof`](#type-merkleproof) - Merkle proof of all transactions’ hash ### Type `TransactionTemplate` @@ -4546,7 +4750,7 @@ Transaction status and the block hash if it is committed. `TxStatus` is a JSON object with the following fields. -* `status`: [`Status`](#type-status) - The transaction status, allowed values: "pending", "proposed" "committed" "unknown" and "rejected". +* `status`: [`Status`](#type-status) - The transaction status, allowed values: “pending”, “proposed” “committed” “unknown” and “rejected”. * `block_hash`: [`H256`](#type-h256) `|` `null` - The block hash of the block which has committed this transaction in the canonical chain. @@ -4565,10 +4769,10 @@ The 128-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON | JSON | Decimal Value | | --- |--- | -| "0x0" | 0 | -| "0x10" | 16 | -| "10" | Invalid, 0x is required | -| "0x01" | Invalid, redundant leading 0 | +| “0x0” | 0 | +| “0x10” | 16 | +| “10” | Invalid, 0x is required | +| “0x01” | Invalid, redundant leading 0 | ### Type `Uint32` @@ -4578,10 +4782,10 @@ The 32-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. | JSON | Decimal Value | | --- |--- | -| "0x0" | 0 | -| "0x10" | 16 | -| "10" | Invalid, 0x is required | -| "0x01" | Invalid, redundant leading 0 | +| “0x0” | 0 | +| “0x10” | 16 | +| “10” | Invalid, 0x is required | +| “0x01” | Invalid, redundant leading 0 | ### Type `Uint64` @@ -4591,10 +4795,10 @@ The 64-bit unsigned integer type encoded as the 0x-prefixed hex string in JSON. | JSON | Decimal Value | | --- |--- | -| "0x0" | 0 | -| "0x10" | 16 | -| "10" | Invalid, 0x is required | -| "0x01" | Invalid, redundant leading 0 | +| “0x0” | 0 | +| “0x10” | 16 | +| “10” | Invalid, 0x is required | +| “0x01” | Invalid, redundant leading 0 | ### Type `UncleBlock` @@ -4608,7 +4812,7 @@ A block B1 is considered to be the uncle of another block B2 if all the followin * B2 block number is larger than B1; -* B1's parent is either B2's ancestor or an uncle embedded in B2 or any of B2's ancestors. +* B1’s parent is either B2’s ancestor or an uncle embedded in B2 or any of B2’s ancestors. * B2 is the first block in its chain to refer to B1. @@ -4633,7 +4837,7 @@ A block B1 is considered to be the uncle of another block B2 if all the followin * B2 block number is larger than B1; -* B1's parent is either B2's ancestor or an uncle embedded in B2 or any of B2's ancestors. +* B1’s parent is either B2’s ancestor or an uncle embedded in B2 or any of B2’s ancestors. * B2 is the first block in its chain to refer to B1. diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 62ef2c3869..153617877d 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -1601,7 +1601,7 @@ impl ChainRpc for ChainRpcImpl { .collect(), ); - CBMT::retrieve_leaves(&block.tx_hashes(), &merkle_proof) + CBMT::retrieve_leaves(block.tx_hashes(), &merkle_proof) .and_then(|tx_hashes| { merkle_proof .root(&tx_hashes) diff --git a/rpc/src/module/experiment.rs b/rpc/src/module/experiment.rs index 8365486319..5effb2242f 100644 --- a/rpc/src/module/experiment.rs +++ b/rpc/src/module/experiment.rs @@ -285,7 +285,7 @@ impl<'a> DryRunner<'a> { let snapshot: &Snapshot = &self.shared.snapshot(); let consensus = snapshot.consensus(); let tip_header = snapshot.tip_header(); - let tx_env = TxVerifyEnv::new_submit(&tip_header); + let tx_env = TxVerifyEnv::new_submit(tip_header); let resolve_opts = { let proposal_window = consensus.tx_proposal_window(); let epoch_number = tx_env.epoch_number(proposal_window); diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 5b34898b0f..4f306d4b1c 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -236,15 +236,9 @@ impl MinerRpc for MinerRpcImpl { proposals_limit: Option, max_version: Option, ) -> Result { - let bytes_limit = match bytes_limit { - Some(b) => Some(b.into()), - None => None, - }; + let bytes_limit = bytes_limit.map(|b| b.into()); - let proposals_limit = match proposals_limit { - Some(b) => Some(b.into()), - None => None, - }; + let proposals_limit = proposals_limit.map(|b| b.into()); self.shared .get_block_template(bytes_limit, proposals_limit, max_version.map(Into::into)) diff --git a/rpc/src/module/net.rs b/rpc/src/module/net.rs index 2a45b2b933..982e200ae0 100644 --- a/rpc/src/module/net.rs +++ b/rpc/src/module/net.rs @@ -606,7 +606,7 @@ impl NetRpc for NetRpcImpl { last_ping_duration: peer .ping_rtt .map(|duration| (duration.as_millis() as u64).into()), - sync_state: self.sync_shared.state().peers().state.get(&peer_index).map( + sync_state: self.sync_shared.state().peers().state.get(peer_index).map( |state| PeerSyncState { best_known_header_hash: state .best_known_header diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index 7e1d7210fd..c445ec34c5 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -9,7 +9,6 @@ use ckb_types::{core, packed, prelude::*, H256}; use ckb_verification::{Since, SinceMetric, TxVerifyEnv}; use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use std::convert::TryInto; use std::sync::Arc; /// RPC Module Pool for transaction memory pool. @@ -425,7 +424,7 @@ impl PoolRpc for PoolRpcImpl { let consensus = snapshot.consensus(); let tx_env = { let tip_header = snapshot.tip_header(); - TxVerifyEnv::new_submit(&tip_header) + TxVerifyEnv::new_submit(tip_header) }; if let Err(e) = IllTransactionChecker::new(&tx, consensus, &tx_env).check() { return Err(RPCError::custom_with_data( diff --git a/rpc/src/tests/examples.rs b/rpc/src/tests/examples.rs index 63304d7bd6..6a41bb2e9c 100644 --- a/rpc/src/tests/examples.rs +++ b/rpc/src/tests/examples.rs @@ -206,7 +206,7 @@ fn setup_rpc_test_suite(height: u64) -> RpcTestSuite { // Start rpc services let rpc_config = RpcConfig { - listen_address: "127.0.0.01:0".to_owned(), + listen_address: "127.0.0.1:0".to_owned(), tcp_listen_address: None, ws_listen_address: None, max_request_body_size: 20_000_000, @@ -505,7 +505,7 @@ impl RpcTestSuite { fn run_example(&self, example: &RpcTestExample) { let mut actual = self.rpc(&example.request); - mock_rpc_response(&example, &mut actual); + mock_rpc_response(example, &mut actual); pretty_assert_eq!( example.response, actual, diff --git a/rust-toolchain b/rust-toolchain index ba0a719118..43c989b553 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.51.0 +1.56.1 diff --git a/script/Cargo.toml b/script/Cargo.toml index 47b4feeb3e..c513167110 100644 --- a/script/Cargo.toml +++ b/script/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-script" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" build = "build.rs" description = "CKB component to run the type/lock scripts." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/script/fuzz/Cargo.toml b/script/fuzz/Cargo.toml index e1c7849177..8473d9e205 100644 --- a/script/fuzz/Cargo.toml +++ b/script/fuzz/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-script-fuzz" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "ckb-script crate fuzz test" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/script/src/error.rs b/script/src/error.rs index 6840c58838..76d0ee840f 100644 --- a/script/src/error.rs +++ b/script/src/error.rs @@ -2,7 +2,6 @@ use crate::types::{ScriptGroup, ScriptGroupType}; use ckb_error::{prelude::*, Error, ErrorKind}; use ckb_types::core::{Cycle, ScriptHashType}; use ckb_types::packed::Script; -use std::convert::TryFrom; use std::{error::Error as StdError, fmt}; /// Script execution error. diff --git a/script/src/syscalls/load_header.rs b/script/src/syscalls/load_header.rs index 04686fa7b6..8bc9531aa1 100644 --- a/script/src/syscalls/load_header.rs +++ b/script/src/syscalls/load_header.rs @@ -56,7 +56,7 @@ impl<'a, DL: HeaderProvider + 'a> LoadHeader<'a, DL> { .into_iter() .any(|hash| &hash == block_hash) { - self.data_loader.get_header(&block_hash) + self.data_loader.get_header(block_hash) } else { None } diff --git a/script/src/syscalls/load_script.rs b/script/src/syscalls/load_script.rs index a93e5c6876..6fa9f1f8d9 100644 --- a/script/src/syscalls/load_script.rs +++ b/script/src/syscalls/load_script.rs @@ -30,7 +30,7 @@ impl Syscalls for LoadScript { } let data = self.script.as_slice(); - let wrote_size = store_data(machine, &data)?; + let wrote_size = store_data(machine, data)?; machine.add_cycles_no_checking(transferred_byte_cycles(wrote_size))?; machine.set_register(A0, Mac::REG::from_u8(SUCCESS)); diff --git a/script/src/syscalls/tests/vm_latest/syscalls_2.rs b/script/src/syscalls/tests/vm_latest/syscalls_2.rs index 083fa29151..feb65a3dbc 100644 --- a/script/src/syscalls/tests/vm_latest/syscalls_2.rs +++ b/script/src/syscalls/tests/vm_latest/syscalls_2.rs @@ -21,7 +21,7 @@ fn test_vm_version() { let result = VMVersion::new().ecall(&mut machine); - assert_eq!(result.unwrap(), true); + assert!(result.unwrap()); assert_eq!(machine.registers()[A0], vm_version); } @@ -42,6 +42,6 @@ fn test_current_cycles() { let result = CurrentCycles::new().ecall(&mut machine); - assert_eq!(result.unwrap(), true); + assert!(result.unwrap()); assert_eq!(machine.registers()[A0], cycles); } diff --git a/script/src/types.rs b/script/src/types.rs index 8084806d11..439a1a34f0 100644 --- a/script/src/types.rs +++ b/script/src/types.rs @@ -11,7 +11,6 @@ use ckb_vm::{ CoreMachine as _, Memory, SupportMachine, ISA_B, ISA_IMC, ISA_MOP, RISCV_PAGESIZE, }; use serde::{Deserialize, Serialize}; -use std::convert::TryFrom; use std::fmt; #[cfg(has_asm)] diff --git a/script/src/verify.rs b/script/src/verify.rs index 0663eb6688..34d1a4df2c 100644 --- a/script/src/verify.rs +++ b/script/src/verify.rs @@ -43,7 +43,6 @@ use ckb_vm::TraceMachine; use std::cell::RefCell; use std::collections::{BTreeMap, HashMap}; -use std::convert::TryFrom; #[cfg(test)] mod tests; @@ -191,7 +190,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D let data_hash = data_loader .load_cell_data_hash(cell_meta) .expect("cell data hash"); - let lazy = LazyData::from_cell_meta(&cell_meta); + let lazy = LazyData::from_cell_meta(cell_meta); binaries_by_data_hash.insert(data_hash.to_owned(), lazy.to_owned()); if let Some(t) = &cell_meta.cell_output.type_().to_opt() { @@ -215,7 +214,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D if let Some(t) = &output.type_().to_opt() { let type_group_entry = type_groups .entry(t.calc_script_hash()) - .or_insert_with(|| ScriptGroup::from_type_script(&t)); + .or_insert_with(|| ScriptGroup::from_type_script(t)); type_group_entry.input_indices.push(i); } } @@ -223,7 +222,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D if let Some(t) = &output.type_().to_opt() { let type_group_entry = type_groups .entry(t.calc_script_hash()) - .or_insert_with(|| ScriptGroup::from_type_script(&t)); + .or_insert_with(|| ScriptGroup::from_type_script(t)); type_group_entry.output_indices.push(i); } } @@ -310,7 +309,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D fn build_exec(&'a self, group_inputs: &'a [usize], group_outputs: &'a [usize]) -> Exec<'a, DL> { Exec::new( - &self.data_loader, + self.data_loader, &self.outputs, self.resolved_inputs(), self.resolved_cell_deps(), @@ -330,7 +329,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D group_outputs: &'a [usize], ) -> LoadCell<'a, DL> { LoadCell::new( - &self.data_loader, + self.data_loader, &self.outputs, self.resolved_inputs(), self.resolved_cell_deps(), @@ -345,7 +344,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D group_outputs: &'a [usize], ) -> LoadCellData<'a, DL> { LoadCellData::new( - &self.data_loader, + self.data_loader, &self.outputs, self.resolved_inputs(), self.resolved_cell_deps(), @@ -365,7 +364,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D fn build_load_header(&'a self, group_inputs: &'a [usize]) -> LoadHeader<'a, DL> { LoadHeader::new( - &self.data_loader, + self.data_loader, self.header_deps(), self.resolved_inputs(), self.resolved_cell_deps(), @@ -486,7 +485,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D e.source(group) })?; - cycles = wrapping_cycles_add(cycles, used_cycles, &group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, group)?; } Ok(cycles) } @@ -533,7 +532,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D .source(group) })?; - match self.verify_group_with_chunk(&group, remain_cycles, &None) { + match self.verify_group_with_chunk(group, remain_cycles, &None) { Ok(ChunkState::Completed(used_cycles)) => { cycles = wrapping_cycles_add(cycles, used_cycles, group)?; } @@ -583,14 +582,14 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D })?; // continue snapshot current script - match self.verify_group_with_chunk(¤t_group, limit_cycles, &snap.snap) { + match self.verify_group_with_chunk(current_group, limit_cycles, &snap.snap) { Ok(ChunkState::Completed(used_cycles)) => { current_used = wrapping_cycles_add( current_used, - wrapping_cycles_sub(used_cycles, current_group_used, ¤t_group)?, - ¤t_group, + wrapping_cycles_sub(used_cycles, current_group_used, current_group)?, + current_group, )?; - cycles = wrapping_cycles_add(cycles, used_cycles, ¤t_group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; } Ok(ChunkState::Suspended(vm)) => { let current = snap.current; @@ -598,7 +597,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D return Ok(VerifyResult::Suspended(state)); } Err(e) => { - return Err(e.source(¤t_group).into()); + return Err(e.source(current_group).into()); } } @@ -614,8 +613,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D match self.verify_group_with_chunk(group, remain_cycles, &None) { Ok(ChunkState::Completed(used_cycles)) => { - current_used = wrapping_cycles_add(current_used, used_cycles, &group)?; - cycles = wrapping_cycles_add(cycles, used_cycles, &group)?; + current_used = wrapping_cycles_add(current_used, used_cycles, group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, group)?; } Ok(ChunkState::Suspended(vm)) => { let current = idx; @@ -675,11 +674,11 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D self.tracing_data_as_code_pages.borrow_mut().clear(); if code == 0 { current_used = - wrapping_cycles_add(current_used, vm.cycles(), ¤t_group)?; - cycles = wrapping_cycles_add(cycles, vm.cycles(), ¤t_group)?; + wrapping_cycles_add(current_used, vm.cycles(), current_group)?; + cycles = wrapping_cycles_add(cycles, vm.cycles(), current_group)?; } else { return Err(ScriptError::validation_failure(¤t_group.script, code) - .source(¤t_group) + .source(current_group) .into()); } } @@ -691,7 +690,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D error => { self.tracing_data_as_code_pages.borrow_mut().clear(); return Err(ScriptError::VMInternalError(format!("{:?}", error)) - .source(¤t_group) + .source(current_group) .into()); } }, @@ -699,8 +698,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D } else { match self.verify_group_with_chunk(current_group, limit_cycles, &None) { Ok(ChunkState::Completed(used_cycles)) => { - current_used = wrapping_cycles_add(current_used, used_cycles, ¤t_group)?; - cycles = wrapping_cycles_add(cycles, used_cycles, ¤t_group)?; + current_used = wrapping_cycles_add(current_used, used_cycles, current_group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; } Ok(ChunkState::Suspended(vm)) => { let state = self.build_state(vm, current, cycles, limit_cycles); @@ -723,8 +722,8 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D match self.verify_group_with_chunk(group, remain_cycles, &None) { Ok(ChunkState::Completed(used_cycles)) => { - current_used = wrapping_cycles_add(current_used, used_cycles, &group)?; - cycles = wrapping_cycles_add(cycles, used_cycles, &group)?; + current_used = wrapping_cycles_add(current_used, used_cycles, group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, group)?; } Ok(ChunkState::Suspended(vm)) => { let current = idx; @@ -772,17 +771,17 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D // continue snapshot current script // max_cycles - cycles checked - match self.verify_group_with_chunk(¤t_group, max_cycles - cycles, &snap.snap) { + match self.verify_group_with_chunk(current_group, max_cycles - cycles, &snap.snap) { Ok(ChunkState::Completed(used_cycles)) => { - cycles = wrapping_cycles_add(cycles, used_cycles, ¤t_group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; } Ok(ChunkState::Suspended(_)) => { return Err(ScriptError::ExceededMaximumCycles(max_cycles) - .source(¤t_group) + .source(current_group) .into()); } Err(e) => { - return Err(e.source(¤t_group).into()); + return Err(e.source(current_group).into()); } } @@ -797,7 +796,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D match self.verify_group_with_chunk(group, remain_cycles, &None) { Ok(ChunkState::Completed(used_cycles)) => { - cycles = wrapping_cycles_add(cycles, used_cycles, ¤t_group)?; + cycles = wrapping_cycles_add(cycles, used_cycles, current_group)?; } Ok(ChunkState::Suspended(_)) => { return Err(ScriptError::ExceededMaximumCycles(max_cycles) @@ -842,7 +841,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider> TransactionScriptsVerifier<'a, D }; verifier.verify() } else { - self.run(&group, max_cycles) + self.run(group, max_cycles) } } /// Returns all script groups. diff --git a/script/src/verify/tests/ckb_latest/features_since_v2021.rs b/script/src/verify/tests/ckb_latest/features_since_v2021.rs index b560070337..8beaacb478 100644 --- a/script/src/verify/tests/ckb_latest/features_since_v2021.rs +++ b/script/src/verify/tests/ckb_latest/features_since_v2021.rs @@ -7,7 +7,6 @@ use ckb_types::{ packed::{self, CellDep, CellInput, CellOutputBuilder, OutPoint, Script}, }; use ckb_vm::Error as VmError; -use std::convert::TryInto; use super::SCRIPT_VERSION; use crate::{ @@ -534,7 +533,7 @@ fn check_type_id_one_in_one_out_resume() { while let Some((_ty, _, group)) = groups.front().cloned() { match verifier - .verify_group_with_chunk(&group, step_cycles, &None) + .verify_group_with_chunk(group, step_cycles, &None) .unwrap() { ChunkState::Completed(used_cycles) => { @@ -650,10 +649,7 @@ fn check_type_id_one_in_one_out_chunk() { ScriptGroupType::Lock => ALWAYS_SUCCESS_SCRIPT_CYCLE - 10, ScriptGroupType::Type => TYPE_ID_CYCLES, }; - match verifier - .verify_group_with_chunk(&group, max, &None) - .unwrap() - { + match verifier.verify_group_with_chunk(group, max, &None).unwrap() { ChunkState::Completed(used_cycles) => { cycles += used_cycles; } @@ -708,7 +704,7 @@ fn check_typical_secp256k1_blake160_2_in_2_out_tx_with_chunk() { } while let Some((_, _, group)) = groups.pop() { match verifier - .verify_group_with_chunk(&group, TWO_IN_TWO_OUT_CYCLES / 10, &None) + .verify_group_with_chunk(group, TWO_IN_TWO_OUT_CYCLES / 10, &None) .unwrap() { ChunkState::Completed(used_cycles) => { diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 01aeaac414..44a25c0170 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-shared" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/spec/Cargo.toml b/spec/Cargo.toml index 28bfc1fcb8..e56c42323f 100644 --- a/spec/Cargo.toml +++ b/spec/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-chain-spec" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The CKB block chain specification" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/spec/src/lib.rs b/spec/src/lib.rs index 81ba45c916..d6e53e2329 100644 --- a/spec/src/lib.rs +++ b/spec/src/lib.rs @@ -36,7 +36,6 @@ use ckb_types::{ }; use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use std::convert::TryFrom; use std::error::Error; use std::fmt; use std::sync::Arc; @@ -930,7 +929,7 @@ pub fn build_genesis_type_id_script(output_index: u64) -> packed::Script { pub(crate) fn build_type_id_script(input: &packed::CellInput, output_index: u64) -> packed::Script { let mut blake2b = new_blake2b(); - blake2b.update(&input.as_slice()); + blake2b.update(input.as_slice()); blake2b.update(&output_index.to_le_bytes()); let mut ret = [0; 32]; blake2b.finalize(&mut ret); diff --git a/spec/src/tests/mod.rs b/spec/src/tests/mod.rs index 7c2fa83b3b..83c69dcd24 100644 --- a/spec/src/tests/mod.rs +++ b/spec/src/tests/mod.rs @@ -172,7 +172,7 @@ fn test_default_params() { genesis_epoch_length = 100 "#; - let params: Params = toml::from_str(&test_params).unwrap(); + let params: Params = toml::from_str(test_params).unwrap(); let expected = Params { genesis_epoch_length: Some(100), ..Default::default() @@ -184,7 +184,7 @@ fn test_default_params() { max_block_bytes = 100 "#; - let params: Params = toml::from_str(&test_params).unwrap(); + let params: Params = toml::from_str(test_params).unwrap(); let expected = Params { max_block_bytes: Some(100), ..Default::default() @@ -196,7 +196,7 @@ fn test_default_params() { max_block_proposals_limit = 100 "#; - let params: Params = toml::from_str(&test_params).unwrap(); + let params: Params = toml::from_str(test_params).unwrap(); let expected = Params { max_block_proposals_limit: Some(100), ..Default::default() @@ -208,7 +208,7 @@ fn test_default_params() { orphan_rate_target = [1, 40] "#; - let params: Params = toml::from_str(&test_params).unwrap(); + let params: Params = toml::from_str(test_params).unwrap(); let expected = Params { orphan_rate_target: Some((1, 40)), ..Default::default() diff --git a/store/Cargo.toml b/store/Cargo.toml index 2cab9e7a78..5919b5c555 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-store" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/store/src/cell.rs b/store/src/cell.rs index 597180bec5..c5e669a923 100644 --- a/store/src/cell.rs +++ b/store/src/cell.rs @@ -102,7 +102,7 @@ pub fn detach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<() let undo_deads = input_pts .iter() .filter_map(|(tx_hash, indexes)| { - txn.get_transaction_with_info(&tx_hash) + txn.get_transaction_with_info(tx_hash) .map(move |(tx, info)| { let block_hash = info.block_hash; let block_number = info.block_number; diff --git a/store/src/db.rs b/store/src/db.rs index a9d80d35a2..240fb2b78f 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -160,7 +160,7 @@ impl ChainDB { txs_fees: vec![], }; - attach_block_cell(&db_txn, &genesis)?; + attach_block_cell(&db_txn, genesis)?; let last_block_hash_in_previous_epoch = epoch.last_block_hash_in_previous_epoch(); db_txn.insert_block(genesis)?; @@ -168,7 +168,7 @@ impl ChainDB { db_txn.insert_tip_header(&genesis.header())?; db_txn.insert_current_epoch_ext(epoch)?; db_txn.insert_block_epoch_index(&genesis_hash, &last_block_hash_in_previous_epoch)?; - db_txn.insert_epoch_ext(&last_block_hash_in_previous_epoch, &epoch)?; + db_txn.insert_epoch_ext(&last_block_hash_in_previous_epoch, epoch)?; db_txn.attach_block(genesis)?; db_txn.commit()?; Ok(()) diff --git a/store/src/store.rs b/store/src/store.rs index 4222158201..ec402dd0df 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -72,7 +72,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } }; let ret = self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }); @@ -95,7 +95,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { ) .take_while(|(key, _)| key.starts_with(prefix)) .map(|(_key, value)| { - let reader = packed::TransactionViewReader::from_slice_should_be_ok(&value.as_ref()); + let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); Unpack::::unpack(&reader) }) .collect() @@ -106,7 +106,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let header = self .get(COLUMN_BLOCK_HEADER, hash.as_slice()) .map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) })?; @@ -116,7 +116,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .get(COLUMN_BLOCK_UNCLE, hash.as_slice()) .map(|slice| { let reader = - packed::UncleBlockVecViewReader::from_slice_should_be_ok(&slice.as_ref()); + packed::UncleBlockVecViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) .expect("block uncles must be stored"); @@ -124,14 +124,14 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let proposals = self .get(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice()) .map(|slice| { - packed::ProposalShortIdVecReader::from_slice_should_be_ok(&slice.as_ref()) + packed::ProposalShortIdVecReader::from_slice_should_be_ok(slice.as_ref()) .to_entity() }) .expect("block proposal_ids must be stored"); let extension_opt = self .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) - .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()); let block = if let Some(extension) = extension_opt { BlockView::new_unchecked_with_extension(header, uncles, body, proposals, extension) @@ -158,8 +158,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { ) .take_while(|(key, _)| key.starts_with(prefix)) .map(|(_key, value)| { - let reader = - packed::TransactionViewReader::from_slice_should_be_ok(&value.as_ref()); + let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); reader.hash().to_entity() }) .collect(); @@ -185,7 +184,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let ret = self .get(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice()) .map(|slice| { - packed::ProposalShortIdVecReader::from_slice_should_be_ok(&slice.as_ref()) + packed::ProposalShortIdVecReader::from_slice_should_be_ok(slice.as_ref()) .to_entity() }); @@ -208,7 +207,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { }; let ret = self.get(COLUMN_BLOCK_UNCLE, hash.as_slice()).map(|slice| { - let reader = packed::UncleBlockVecViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::UncleBlockVecViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }); @@ -232,7 +231,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let ret = self .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) - .map(|slice| packed::BytesReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()); + .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()); if let Some(cache) = self.cache() { cache.block_extensions.lock().put(hash.clone(), ret.clone()); @@ -243,20 +242,20 @@ pub trait ChainStore<'a>: Send + Sync + Sized { /// Get block ext by block header hash fn get_block_ext(&'a self, block_hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_EXT, block_hash.as_slice()) - .map(|slice| packed::BlockExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack()) + .map(|slice| packed::BlockExtReader::from_slice_should_be_ok(slice.as_ref()).unpack()) } /// Get block header hash by block number fn get_block_hash(&'a self, number: BlockNumber) -> Option { let block_number: packed::Uint64 = number.pack(); self.get(COLUMN_INDEX, block_number.as_slice()) - .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity()) + .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// Get block number by block header hash fn get_block_number(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_INDEX, hash.as_slice()) - .map(|raw| packed::Uint64Reader::from_slice_should_be_ok(&raw.as_ref()).unpack()) + .map(|raw| packed::Uint64Reader::from_slice_should_be_ok(raw.as_ref()).unpack()) } /// TODO(doc): @quake @@ -269,7 +268,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { self.get(COLUMN_META, META_TIP_HEADER_KEY) .and_then(|raw| { self.get_block_header( - &packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity(), + &packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity(), ) }) .map(Into::into) @@ -296,8 +295,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { fn get_transaction_info(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_TRANSACTION_INFO, hash.as_slice()) .map(|slice| { - let reader = - packed::TransactionInfoReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::TransactionInfoReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) } @@ -321,8 +319,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { } self.get(COLUMN_BLOCK_BODY, tx_info.key().as_slice()) .map(|slice| { - let reader = - packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::TransactionViewReader::from_slice_should_be_ok(slice.as_ref()); (reader.unpack(), tx_info) }) } @@ -337,7 +334,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { fn get_cell(&'a self, out_point: &OutPoint) -> Option { let key = out_point.to_cell_key(); self.get(COLUMN_CELL, &key).map(|slice| { - let reader = packed::CellEntryReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::CellEntryReader::from_slice_should_be_ok(slice.as_ref()); build_cell_meta_from_reader(out_point.clone(), reader) }) } @@ -353,7 +350,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let ret = self.get(COLUMN_CELL_DATA, &key).map(|slice| { if !slice.as_ref().is_empty() { - let reader = packed::CellDataEntryReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::CellDataEntryReader::from_slice_should_be_ok(slice.as_ref()); let data = reader.output_data().unpack(); let data_hash = reader.output_data_hash().to_entity(); (data, data_hash) @@ -392,7 +389,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let ret = self.get(COLUMN_CELL_DATA_HASH, &key).map(|raw| { if !raw.as_ref().is_empty() { - packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity() + packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity() } else { // impl packed::CellOutput { // pub fn calc_data_hash(data: &[u8]) -> packed::Byte32 { @@ -420,26 +417,26 @@ pub trait ChainStore<'a>: Send + Sync + Sized { /// Gets current epoch ext fn get_current_epoch_ext(&'a self) -> Option { self.get(COLUMN_META, META_CURRENT_EPOCH_KEY) - .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack()) + .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(slice.as_ref()).unpack()) } /// Gets epoch ext by epoch index fn get_epoch_ext(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_EPOCH, hash.as_slice()) - .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack()) + .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(slice.as_ref()).unpack()) } /// Gets epoch index by epoch number fn get_epoch_index(&'a self, number: EpochNumber) -> Option { let epoch_number: packed::Uint64 = number.pack(); self.get(COLUMN_EPOCH, epoch_number.as_slice()) - .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity()) + .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// Gets epoch index by block hash fn get_block_epoch_index(&'a self, block_hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_EPOCH, block_hash.as_slice()) - .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity()) + .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// TODO(doc): @quake @@ -456,7 +453,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { /// Gets header by uncle header hash fn get_uncle_header(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_UNCLES, hash.as_slice()).map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) } @@ -477,7 +474,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { .block_hash(hash.to_owned()) .build(); self.get(COLUMN_BLOCK_BODY, key.as_slice()).map(|slice| { - let reader = packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::TransactionViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) } @@ -487,7 +484,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { let header = self .get(COLUMN_BLOCK_HEADER, hash.as_slice()) .map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); reader.data().to_entity() })?; @@ -499,8 +496,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { ) .take_while(|(key, _)| key.starts_with(prefix)) .map(|(_key, value)| { - let reader = - packed::TransactionViewReader::from_slice_should_be_ok(&value.as_ref()); + let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); reader.data().to_entity() }) .pack(); @@ -533,7 +529,7 @@ pub trait ChainStore<'a>: Send + Sync + Sized { /// TODO(doc): @quake fn get_packed_block_header(&'a self, hash: &packed::Byte32) -> Option { self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref()); + let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); reader.data().to_entity() }) } diff --git a/store/src/tests/db.rs b/store/src/tests/db.rs index cf56522e42..9c8c7f7ba1 100644 --- a/store/src/tests/db.rs +++ b/store/src/tests/db.rs @@ -17,7 +17,7 @@ fn save_and_get_block() { let hash = block.hash(); let txn = store.begin_transaction(); - txn.insert_block(&block).unwrap(); + txn.insert_block(block).unwrap(); txn.commit().unwrap(); assert_eq!(block, &store.get_block(&hash).unwrap()); } diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 5accd9c5d4..0e90d19296 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -140,7 +140,7 @@ impl StoreTransaction { self.inner .get_for_update(COLUMN_META, META_TIP_HEADER_KEY, &snapshot.inner) .expect("db operation should be ok") - .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(&slice.as_ref()).to_entity()) + .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } /// TODO(doc): @quake @@ -161,7 +161,7 @@ impl StoreTransaction { self.insert_raw( COLUMN_BLOCK_EXTENSION, hash.as_slice(), - &extension.as_slice(), + extension.as_slice(), )?; } self.insert_raw( @@ -251,8 +251,8 @@ impl StoreTransaction { for uncle in block.uncles().into_iter() { self.insert_raw( COLUMN_UNCLES, - &uncle.hash().as_slice(), - &uncle.header().pack().as_slice(), + uncle.hash().as_slice(), + uncle.header().pack().as_slice(), )?; } self.insert_raw(COLUMN_INDEX, block_hash.as_slice(), block_number.as_slice()) diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 65cb505eb1..4c1afaa52c 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-sync" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The ckb sync/relayer protocols implementation" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/sync/src/orphan_block_pool.rs b/sync/src/orphan_block_pool.rs index 86eb32e6d1..25015011ba 100644 --- a/sync/src/orphan_block_pool.rs +++ b/sync/src/orphan_block_pool.rs @@ -86,7 +86,7 @@ impl InnerPool { pub fn get_block(&self, hash: &packed::Byte32) -> Option { self.parents.get(hash).and_then(|parent_hash| { self.blocks - .get(&parent_hash) + .get(parent_hash) .and_then(|blocks| blocks.get(hash).cloned()) }) } @@ -98,7 +98,7 @@ impl InnerPool { for hash in self.leaders.clone().iter() { if self.need_clean(hash, tip_epoch) { // remove items in orphan pool and return hash to callee(clean header map) - let descendants = self.remove_blocks_by_parent(&hash); + let descendants = self.remove_blocks_by_parent(hash); result.extend(descendants.iter().map(|block| block.hash())); } } diff --git a/sync/src/relayer/block_transactions_process.rs b/sync/src/relayer/block_transactions_process.rs index 22cedbf602..7c8487c94c 100644 --- a/sync/src/relayer/block_transactions_process.rs +++ b/sync/src/relayer/block_transactions_process.rs @@ -77,13 +77,13 @@ impl<'a> BlockTransactionsProcess<'a> { ); attempt!(BlockTransactionsVerifier::verify( - &compact_block, - &expected_transaction_indexes, + compact_block, + expected_transaction_indexes, &received_transactions, )); attempt!(BlockUnclesVerifier::verify( - &compact_block, - &expected_uncle_indexes, + compact_block, + expected_uncle_indexes, &received_uncles, )); @@ -91,7 +91,7 @@ impl<'a> BlockTransactionsProcess<'a> { &active_chain, compact_block, received_transactions, - &expected_uncle_indexes, + expected_uncle_indexes, &received_uncles, ); diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 6fcc6b1247..f5441c2761 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -150,8 +150,7 @@ impl<'a> CompactBlockProcess<'a> { let median_time_context = CompactBlockMedianTimeView { fn_get_pending_header: Box::new(fn_get_pending_header), }; - let header_verifier = - HeaderVerifier::new(&median_time_context, &shared.consensus()); + let header_verifier = HeaderVerifier::new(&median_time_context, shared.consensus()); if let Err(err) = header_verifier.verify(&header) { if err .downcast_ref::() diff --git a/sync/src/relayer/get_block_proposal_process.rs b/sync/src/relayer/get_block_proposal_process.rs index 52d1c798bd..7c6ab110f0 100644 --- a/sync/src/relayer/get_block_proposal_process.rs +++ b/sync/src/relayer/get_block_proposal_process.rs @@ -61,7 +61,7 @@ impl<'a> GetBlockProposalProcess<'a> { // Transactions that do not exist on this node let not_exist_proposals: Vec = proposals .into_iter() - .filter(|short_id| !fetched_transactions.contains_key(&short_id)) + .filter(|short_id| !fetched_transactions.contains_key(short_id)) .collect(); // Cache request, try process on timer diff --git a/sync/src/relayer/tests/block_proposal_process.rs b/sync/src/relayer/tests/block_proposal_process.rs index f9a84b42d6..e88ec876ad 100644 --- a/sync/src/relayer/tests/block_proposal_process.rs +++ b/sync/src/relayer/tests/block_proposal_process.rs @@ -38,7 +38,7 @@ fn test_no_asked() { assert_eq!(process.execute(), Status::ignored()); let known = relayer.shared.state().already_known_tx(&transaction.hash()); - assert_eq!(known, false); + assert!(!known); } #[test] @@ -67,7 +67,7 @@ fn test_ok() { assert_eq!(process.execute(), Status::ok()); let known = relayer.shared.state().already_known_tx(&transaction.hash()); - assert_eq!(known, true); + assert!(known); } #[test] diff --git a/sync/src/relayer/tests/block_transactions_process.rs b/sync/src/relayer/tests/block_transactions_process.rs index 9814918bef..afcfe98e4c 100644 --- a/sync/src/relayer/tests/block_transactions_process.rs +++ b/sync/src/relayer/tests/block_transactions_process.rs @@ -14,7 +14,6 @@ use ckb_types::{ }, }; use std::collections::HashMap; -use std::iter::FromIterator; use std::sync::Arc; #[test] diff --git a/sync/src/relayer/tests/block_transactions_verifier.rs b/sync/src/relayer/tests/block_transactions_verifier.rs index c8e214ad13..16d49ac962 100644 --- a/sync/src/relayer/tests/block_transactions_verifier.rs +++ b/sync/src/relayer/tests/block_transactions_verifier.rs @@ -1,15 +1,12 @@ use super::helper::new_index_transaction; use crate::relayer::block_transactions_verifier::BlockTransactionsVerifier; use crate::{Status, StatusCode}; -use ckb_types::packed::{CompactBlock, CompactBlockBuilder, IndexTransaction}; +use ckb_types::packed::{CompactBlock, CompactBlockBuilder}; use ckb_types::prelude::*; // block_short_ids: vec![None, Some(1), None, Some(3), Some(4), None] fn build_compact_block() -> CompactBlock { - let prefilled: Vec = vec![0, 2, 5] - .into_iter() - .map(new_index_transaction) - .collect(); + let prefilled_iter = vec![0, 2, 5].into_iter().map(new_index_transaction); let short_ids = vec![1, 3, 4] .into_iter() @@ -18,7 +15,7 @@ fn build_compact_block() -> CompactBlock { CompactBlockBuilder::default() .short_ids(short_ids.pack()) - .prefilled_transactions(prefilled.into_iter().pack()) + .prefilled_transactions(prefilled_iter.pack()) .build() } diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index 805eb4bef3..5b463d5717 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -13,7 +13,6 @@ use ckb_types::{ }; use faketime::unix_time_as_millis; use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; use std::sync::Arc; #[test] @@ -26,7 +25,7 @@ fn test_in_block_status_map() { .get_block_hash(4) .and_then(|block_hash| shared.store().get_block(&block_hash)) .unwrap(); - new_header_builder(&relayer.shared.shared(), &parent.header()).build() + new_header_builder(relayer.shared.shared(), &parent.header()).build() }; let block = BlockBuilder::default() .transaction(TransactionBuilder::default().build()) diff --git a/sync/src/relayer/tests/helper.rs b/sync/src/relayer/tests/helper.rs index 901f036795..58167b6298 100644 --- a/sync/src/relayer/tests/helper.rs +++ b/sync/src/relayer/tests/helper.rs @@ -50,7 +50,7 @@ pub(crate) fn new_header_builder(shared: &Shared, parent: &HeaderView) -> Header let snapshot = shared.snapshot(); let epoch = snapshot .consensus() - .next_epoch_ext(&parent, &snapshot.as_data_provider()) + .next_epoch_ext(parent, &snapshot.as_data_provider()) .unwrap() .epoch(); HeaderBuilder::default() diff --git a/sync/src/relayer/transaction_hashes_process.rs b/sync/src/relayer/transaction_hashes_process.rs index e7baa7d2d5..3dda03611c 100644 --- a/sync/src/relayer/transaction_hashes_process.rs +++ b/sync/src/relayer/transaction_hashes_process.rs @@ -41,7 +41,7 @@ impl<'a> TransactionHashesProcess<'a> { .tx_hashes() .iter() .map(|x| x.to_entity()) - .filter(|tx_hash| !tx_filter.contains(&tx_hash)) + .filter(|tx_hash| !tx_filter.contains(tx_hash)) .collect() }; diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index bb58043d26..647c7587de 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -36,7 +36,7 @@ impl<'a> BlockFetcher<'a> { } pub fn is_better_chain(&self, header: &HeaderView) -> bool { - header.is_better_than(&self.active_chain.total_difficulty()) + header.is_better_than(self.active_chain.total_difficulty()) } pub fn peer_best_known_header(&self) -> Option { @@ -60,7 +60,7 @@ impl<'a> BlockFetcher<'a> { // of its current tip anymore. Go back enough to fix that. last_common = self .active_chain - .last_common_ancestor(&last_common, &best_known.inner())?; + .last_common_ancestor(&last_common, best_known.inner())?; self.synchronizer .peers() diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 1c6bbbb76d..13f289a03e 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -54,7 +54,7 @@ impl<'a> HeadersProcess<'a> { pub fn accept_first(&self, first: &core::HeaderView) -> ValidationResult { let shared: &SyncShared = self.synchronizer.shared(); - let verifier = HeaderVerifier::new(shared, &shared.consensus()); + let verifier = HeaderVerifier::new(shared, shared.consensus()); let acceptor = HeaderAcceptor::new(first, self.peer, verifier, self.active_chain.clone()); acceptor.accept() } @@ -148,7 +148,7 @@ impl<'a> HeadersProcess<'a> { for header in headers.iter().skip(1) { let verifier = HeaderVerifier::new(shared, consensus); let acceptor = - HeaderAcceptor::new(&header, self.peer, verifier, self.active_chain.clone()); + HeaderAcceptor::new(header, self.peer, verifier, self.active_chain.clone()); let result = acceptor.accept(); match result.state { ValidationState::Invalid => { @@ -327,7 +327,7 @@ impl<'a, DL: HeaderProvider> HeaderAcceptor<'a, DL> { return result; } - shared.insert_valid_header(self.peer, &self.header); + shared.insert_valid_header(self.peer, self.header); result } } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 0e8d850be6..4fd685e23c 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -338,7 +338,7 @@ impl Synchronizer { peer: PeerIndex, ibd: IBDState, ) -> Option>> { - BlockFetcher::new(&self, peer, ibd).fetch() + BlockFetcher::new(self, peer, ibd).fetch() } pub(crate) fn on_connected(&self, nc: &dyn CKBProtocolContext, peer: PeerIndex) { diff --git a/sync/src/tests/block_status.rs b/sync/src/tests/block_status.rs index bf8a76b3ab..22fb317072 100644 --- a/sync/src/tests/block_status.rs +++ b/sync/src/tests/block_status.rs @@ -14,11 +14,9 @@ fn all() -> Vec { } fn assert_contain(includes: Vec, target: BlockStatus) { - let excludes: Vec = all() - .into_iter() - .filter(|s1| !includes.iter().any(|s2| s2 == s1)) - .collect(); - includes.into_iter().for_each(|status| { + let all = all(); + let excludes = all.iter().filter(|s1| !includes.iter().any(|s2| &s2 == s1)); + includes.iter().for_each(|status| { assert!( status.contains(target), "{:?} should contains {:?}", diff --git a/sync/src/tests/inflight_blocks.rs b/sync/src/tests/inflight_blocks.rs index 8924e6370a..e2509d8519 100644 --- a/sync/src/tests/inflight_blocks.rs +++ b/sync/src/tests/inflight_blocks.rs @@ -3,7 +3,6 @@ use ckb_constant::sync::BLOCK_DOWNLOAD_TIMEOUT; use ckb_types::h256; use ckb_types::prelude::*; use std::collections::HashSet; -use std::iter::FromIterator; #[test] fn inflight_blocks_count() { diff --git a/sync/src/tests/sync_shared.rs b/sync/src/tests/sync_shared.rs index ab80fdc6a7..0543d4bf69 100644 --- a/sync/src/tests/sync_shared.rs +++ b/sync/src/tests/sync_shared.rs @@ -18,18 +18,12 @@ fn test_insert_new_block() { Arc::new(next_block) }; - assert_eq!( - shared - .insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert valid block"), - true, - ); - assert_eq!( - shared - .insert_new_block(&chain, Arc::clone(&new_block)) - .expect("insert duplicated valid block"), - false, - ); + assert!(shared + .insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert valid block"),); + assert!(!shared + .insert_new_block(&chain, Arc::clone(&new_block)) + .expect("insert duplicated valid block"),); } #[test] @@ -95,18 +89,12 @@ fn test_insert_parent_unknown_block() { let invalid_hash = invalid_orphan.header().hash(); let parent_hash = parent.header().hash(); - assert_eq!( - shared - .insert_new_block(&chain, Arc::clone(&valid_orphan)) - .expect("insert orphan block"), - false, - ); - assert_eq!( - shared - .insert_new_block(&chain, Arc::clone(&invalid_orphan)) - .expect("insert orphan block"), - false, - ); + assert!(!shared + .insert_new_block(&chain, Arc::clone(&valid_orphan)) + .expect("insert orphan block"),); + assert!(!shared + .insert_new_block(&chain, Arc::clone(&invalid_orphan)) + .expect("insert orphan block"),); assert_eq!( shared.active_chain().get_block_status(&valid_hash), BlockStatus::BLOCK_RECEIVED @@ -117,12 +105,9 @@ fn test_insert_parent_unknown_block() { ); // After inserting parent of an orphan block - assert_eq!( - shared - .insert_new_block(&chain, Arc::clone(&parent)) - .expect("insert parent of orphan block"), - true, - ); + assert!(shared + .insert_new_block(&chain, Arc::clone(&parent)) + .expect("insert parent of orphan block"),); assert_eq!( shared.active_chain().get_block_status(&valid_hash), BlockStatus::BLOCK_VALID @@ -162,12 +147,9 @@ fn test_switch_valid_fork() { let mut valid_fork = Vec::new(); for _ in 2..shared.active_chain().tip_number() { let block = make_valid_block(shared.shared(), parent_hash.clone()); - assert_eq!( - shared - .insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"), - true, - ); + assert!(shared + .insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork"),); parent_hash = block.header().hash(); valid_fork.push(block); @@ -185,12 +167,9 @@ fn test_switch_valid_fork() { // Make the fork switch as the main chain. for _ in tip_number..tip_number + 2 { let block = inherit_block(shared.shared(), &parent_hash.clone()).build(); - assert_eq!( - shared - .insert_new_block(&chain, Arc::new(block.clone())) - .expect("insert fork"), - true, - ); + assert!(shared + .insert_new_block(&chain, Arc::new(block.clone())) + .expect("insert fork"),); parent_hash = block.header().hash(); valid_fork.push(block); diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index b6263920ec..fe4502763c 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -42,7 +42,7 @@ use crate::{ fn start_chain(consensus: Option) -> (ChainController, Shared, Synchronizer) { let mut builder = SharedBuilder::with_temp_db(); - let consensus = consensus.unwrap_or_else(Default::default); + let consensus = consensus.unwrap_or_default(); builder = builder.consensus(consensus); let (shared, mut pack) = builder.build().unwrap(); @@ -530,7 +530,7 @@ fn test_sync_process() { let locator1 = synchronizer1 .shared .active_chain() - .get_locator(&shared1.snapshot().tip_header()); + .get_locator(shared1.snapshot().tip_header()); for i in 1..=num { let j = if i > 192 { i + 1 } else { i }; @@ -765,14 +765,11 @@ fn test_chain_sync_timeout() { synchronizer.eviction(&network_context); { // Protected peer 0 still in sync state - assert_eq!( - peers - .state - .get(&sync_protected_peer) - .unwrap() - .sync_started(), - true - ); + assert!(peers + .state + .get(&sync_protected_peer) + .unwrap() + .sync_started(),); assert_eq!( synchronizer .shared() @@ -864,14 +861,11 @@ fn test_chain_sync_timeout() { synchronizer.eviction(&network_context); { // Protected peer 0 chain_sync timeout - assert_eq!( - peers - .state - .get(&sync_protected_peer) - .unwrap() - .sync_started(), - false - ); + assert!(!peers + .state + .get(&sync_protected_peer) + .unwrap() + .sync_started(),); assert_eq!( synchronizer .shared() @@ -961,14 +955,11 @@ fn test_n_sync_started() { synchronizer.eviction(&network_context); { // Protected peer 0 chain_sync timeout - assert_eq!( - peers - .state - .get(&sync_protected_peer) - .unwrap() - .sync_started(), - false - ); + assert!(!peers + .state + .get(&sync_protected_peer) + .unwrap() + .sync_started(),); assert_eq!( synchronizer .shared() diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 62e18b86f5..76e2eb844d 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -664,7 +664,7 @@ impl InflightBlocks { } }; if !trace.is_empty() { - trace.remove(&key); + trace.remove(key); } remove_key.push(key.clone()); } @@ -827,7 +827,7 @@ impl Peers { pub fn may_set_best_known_header(&self, peer: PeerIndex, header_view: HeaderView) { if let Some(mut peer_state) = self.state.get_mut(&peer) { if let Some(ref hv) = peer_state.best_known_header { - if header_view.is_better_than(&hv.total_difficulty()) { + if header_view.is_better_than(hv.total_difficulty()) { peer_state.best_known_header = Some(header_view); } } else { @@ -1314,7 +1314,7 @@ impl SyncShared { } }; if let Err(ref error) = ret { - if !is_internal_db_error(&error) { + if !is_internal_db_error(error) { error!("accept block {:?} {}", block, error); self.state .insert_block_status(block.header().hash(), BlockStatus::BLOCK_INVALID); @@ -1385,7 +1385,7 @@ impl SyncShared { .get_block_header(hash) .and_then(|header| { store - .get_block_ext(&hash) + .get_block_ext(hash) .map(|block_ext| HeaderView::new(header, block_ext.total_difficulty)) }) .or_else(|| self.state.header_map.get(hash)) @@ -1393,7 +1393,7 @@ impl SyncShared { self.state.header_map.get(hash).or_else(|| { store.get_block_header(hash).and_then(|header| { store - .get_block_ext(&hash) + .get_block_ext(hash) .map(|block_ext| HeaderView::new(header, block_ext.total_difficulty)) }) }) @@ -1402,13 +1402,13 @@ impl SyncShared { /// Check whether block has been inserted to chain store pub fn is_stored(&self, hash: &packed::Byte32) -> bool { - let status = self.active_chain().get_block_status(&hash); + let status = self.active_chain().get_block_status(hash); status.contains(BlockStatus::BLOCK_STORED) } /// Get epoch ext by block hash pub fn get_epoch_ext(&self, hash: &Byte32) -> Option { - self.store().get_block_epoch(&hash) + self.store().get_block_epoch(hash) } } @@ -1582,7 +1582,7 @@ impl SyncState { } pub fn may_set_shared_best_header(&self, header: HeaderView) { - if !header.is_better_than(&self.shared_best_header.read().total_difficulty()) { + if !header.is_better_than(self.shared_best_header.read().total_difficulty()) { return; } diff --git a/test/Cargo.toml b/test/Cargo.toml index 23d9296b7e..9c5d345936 100644 --- a/test/Cargo.toml +++ b/test/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-test" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB integration tests." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/test/src/net.rs b/test/src/net.rs index 407987c069..0fd791e6dd 100644 --- a/test/src/net.rs +++ b/test/src/net.rs @@ -221,9 +221,7 @@ impl CKBProtocolHandler for DummyProtocolHandler { .unwrap_or_default(); let (sender, receiver) = unbounded(); let mut senders = self.senders.lock(); - if !senders.contains_key(&peer_index) { - senders.insert(peer_index, sender); - } + senders.entry(peer_index).or_insert(sender); let _ = self.register_tx.send((node_id, peer_index, receiver)); } diff --git a/test/src/specs/dao/dao_verifier.rs b/test/src/specs/dao/dao_verifier.rs index 11805641ff..e59e8c8d95 100644 --- a/test/src/specs/dao/dao_verifier.rs +++ b/test/src/specs/dao/dao_verifier.rs @@ -8,7 +8,6 @@ use ckb_types::packed::{Byte32, CellOutput, OutPoint}; use ckb_types::prelude::Unpack; use ckb_util::Mutex; use std::collections::HashMap; -use std::convert::TryFrom; #[derive(Default)] #[allow(non_snake_case)] diff --git a/test/src/specs/dao/utils.rs b/test/src/specs/dao/utils.rs index d5dd0eabea..82b0334427 100644 --- a/test/src/specs/dao/utils.rs +++ b/test/src/specs/dao/utils.rs @@ -9,7 +9,7 @@ pub(crate) fn ensure_committed(node: &Node, transaction: &TransactionView) -> Ou let commit_elapsed = node.consensus().tx_proposal_window().closest() + 2; node.rpc_client() .send_transaction(transaction.data().into()); - mine(&node, commit_elapsed); + mine(node, commit_elapsed); assert!(is_transaction_committed(node, transaction)); OutPoint::new(transaction.hash(), 0) } @@ -25,6 +25,6 @@ pub(crate) fn goto_target_point(node: &Node, target_point: EpochNumberWithFracti break; } - mine(&node, 1); + mine(node, 1); } } diff --git a/test/src/specs/hardfork/v2021/cell_deps.rs b/test/src/specs/hardfork/v2021/cell_deps.rs index e5c0de0e67..d867b4043f 100644 --- a/test/src/specs/hardfork/v2021/cell_deps.rs +++ b/test/src/specs/hardfork/v2021/cell_deps.rs @@ -775,7 +775,7 @@ impl<'a> CheckCellDepsTestRunner<'a> { .build(); self.adjust_tip_before_test(); if let Some(errmsg) = expected.error_message() { - assert_send_transaction_fail(self.node, &tx, &errmsg); + assert_send_transaction_fail(self.node, &tx, errmsg); } else { self.submit_transaction_until_committed(&tx); } diff --git a/test/src/specs/hardfork/v2021/delay_txs.rs b/test/src/specs/hardfork/v2021/delay_txs.rs index eaf74d7ec2..93b3795956 100644 --- a/test/src/specs/hardfork/v2021/delay_txs.rs +++ b/test/src/specs/hardfork/v2021/delay_txs.rs @@ -37,12 +37,12 @@ impl Spec for DelayTxs { let ret = node.rpc_client().get_transaction(tx.hash()); assert!(ret.is_none(), "tx should be delayed"); - mine(&node, 1); + mine(node, 1); } // tx should be processed after delay_windows // but in order to avoid asynchronous non-determinism // we check in next block. - mine(&node, 1); + mine(node, 1); node.wait_for_tx_pool(); node.assert_tx_pool_size(delay_windows, 0); } diff --git a/test/src/specs/hardfork/v2021/header_deps.rs b/test/src/specs/hardfork/v2021/header_deps.rs index f5a6790ece..30f5f69c3d 100644 --- a/test/src/specs/hardfork/v2021/header_deps.rs +++ b/test/src/specs/hardfork/v2021/header_deps.rs @@ -189,7 +189,7 @@ impl<'a> ImmatureHeaderDepsTestRunner<'a> { fn test_send(&self, tx: &TransactionView, expected: ExpectedResult) { if let Some(errmsg) = expected.error_message() { - assert_send_transaction_fail(self.node, tx, &errmsg); + assert_send_transaction_fail(self.node, tx, errmsg); } else { self.submit_transaction_until_committed(tx); } diff --git a/test/src/specs/hardfork/v2021/since.rs b/test/src/specs/hardfork/v2021/since.rs index d7a7a24e70..c5356e873e 100644 --- a/test/src/specs/hardfork/v2021/since.rs +++ b/test/src/specs/hardfork/v2021/since.rs @@ -63,13 +63,13 @@ impl Spec for CheckAbsoluteEpochSince { let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); } - mine(&node, 1); + mine(node, 1); { info!("CKB v2019, since absolute epoch failed (boundary, malformed)"); let tx = create_tx_since_absolute_epoch(node, 0, (epoch_length - 1) + epoch_length); assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); } - mine(&node, 1); + mine(node, 1); assert_epoch_should_be(node, 2, 0, epoch_length); { info!("CKB v2021, since absolute epoch failed (boundary, malformed)"); @@ -119,7 +119,7 @@ impl Spec for CheckAbsoluteEpochSince { let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); } - mine(&node, 1); + mine(node, 1); { info!("CKB v2021, since absolute epoch ok"); let tx = create_tx_since_absolute_epoch(node, 3, 1); @@ -152,9 +152,9 @@ impl Spec for CheckRelativeEpochSince { { info!("CKB v2019, since relative epoch failed"); let tx = create_tx_since_relative_epoch(node, 1, 0); - mine(&node, epoch_length - 1); + mine(node, epoch_length - 1); assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); - mine(&node, 1); + mine(node, 1); info!("CKB v2019, since relative epoch ok"); let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); @@ -163,9 +163,9 @@ impl Spec for CheckRelativeEpochSince { { info!("CKB v2019, since relative epoch failed (malformed)"); let tx = create_tx_since_relative_epoch(node, 0, epoch_length); - mine(&node, epoch_length - 1); + mine(node, epoch_length - 1); assert_send_transaction_fail(node, &tx, ERROR_IMMATURE); - mine(&node, 1); + mine(node, 1); info!("CKB v2019, since relative epoch ok (malformed)"); let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); @@ -174,28 +174,28 @@ impl Spec for CheckRelativeEpochSince { { info!("CKB v2019, since relative epoch ok (index=length=0)"); let tx = create_tx_since_relative_epoch_with_length(node, 1, 0, 0); - mine(&node, epoch_length); + mine(node, epoch_length); let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); } { info!("CKB v2019, since relative epoch ok (index>length=0)"); let tx = create_tx_since_relative_epoch_with_length(node, 1, 1, 0); - mine(&node, epoch_length); + mine(node, epoch_length); let res = node.rpc_client().send_transaction_result(tx.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); } assert_epoch_should_be(node, 5, epoch_length - 4, epoch_length); { let tx1 = create_tx_since_relative_epoch(node, 0, epoch_length); - mine(&node, 1); + mine(node, 1); let tx2 = create_tx_since_relative_epoch(node, 0, epoch_length); let tx3 = create_tx_since_relative_epoch_with_length(node, 1, 1, 0); - mine(&node, epoch_length - 2); + mine(node, epoch_length - 2); info!("CKB v2019, since relative epoch failed (boundary, malformed)"); assert_send_transaction_fail(node, &tx1, ERROR_IMMATURE); - mine(&node, 1); + mine(node, 1); info!("CKB v2019, since relative epoch ok (boundary, malformed)"); let res = node.rpc_client().send_transaction_result(tx1.data().into()); assert!(res.is_ok(), "result: {:?}", res.unwrap_err()); @@ -206,7 +206,7 @@ impl Spec for CheckRelativeEpochSince { info!("CKB v2019, since relative epoch failed (boundary, index>length=0)"); assert_send_transaction_fail(node, &tx3, ERROR_IMMATURE); - mine(&node, 1); + mine(node, 1); info!("CKB v2019, since relative epoch failed (boundary, malformed)"); assert_send_transaction_fail(node, &tx2, ERROR_INVALID_SINCE); @@ -217,18 +217,18 @@ impl Spec for CheckRelativeEpochSince { info!("CKB v2019, since relative epoch transaction will be committed (boundary, malformed)"); assert_epoch_should_be(node, 6, epoch_length - 3, epoch_length); assert!(check::is_transaction_pending(node, &tx1)); - mine(&node, 1); + mine(node, 1); assert!(check::is_transaction_proposed(node, &tx1)); - mine(&node, 1); + mine(node, 1); assert!(check::is_transaction_committed(node, &tx1)); assert_epoch_should_be(node, 6, epoch_length - 1, epoch_length); } { info!("CKB v2021, since relative epoch failed (malformed)"); let tx = create_tx_since_relative_epoch(node, 0, epoch_length); - mine(&node, epoch_length - 1); + mine(node, epoch_length - 1); assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); - mine(&node, 1); + mine(node, 1); info!("CKB v2021, since relative epoch failed (malformed)"); assert_send_transaction_fail(node, &tx, ERROR_INVALID_SINCE); } @@ -236,7 +236,7 @@ impl Spec for CheckRelativeEpochSince { let tx1 = create_tx_since_relative_epoch_with_length(node, 1, 1, 0); let tx2 = create_tx_since_relative_epoch_with_length(node, 1, 0, 0); - mine(&node, epoch_length); + mine(node, epoch_length); info!("CKB v2021, since relative epoch failed (index>length=0)"); assert_send_transaction_fail(node, &tx1, ERROR_INVALID_SINCE); diff --git a/test/src/specs/hardfork/v2021/vm_b_extension.rs b/test/src/specs/hardfork/v2021/vm_b_extension.rs index 4eb1caf951..71e2f4f616 100644 --- a/test/src/specs/hardfork/v2021/vm_b_extension.rs +++ b/test/src/specs/hardfork/v2021/vm_b_extension.rs @@ -267,7 +267,7 @@ impl<'a> CheckVmBExtensionTestRunner<'a> { .output_data(Default::default()) .build(); if let Some(errmsg) = expected.error_message() { - assert_send_transaction_fail(self.node, &tx, &errmsg); + assert_send_transaction_fail(self.node, &tx, errmsg); } else { self.submit_transaction_until_committed(&tx); } diff --git a/test/src/specs/hardfork/v2021/vm_version.rs b/test/src/specs/hardfork/v2021/vm_version.rs index 073c230b56..9ebabeea61 100644 --- a/test/src/specs/hardfork/v2021/vm_version.rs +++ b/test/src/specs/hardfork/v2021/vm_version.rs @@ -257,7 +257,7 @@ impl<'a> CheckVmVersionTestRunner<'a> { .output_data(Default::default()) .build(); if let Some(errmsg) = expected.error_message() { - assert_send_transaction_fail(self.node, &tx, &errmsg); + assert_send_transaction_fail(self.node, &tx, errmsg); None } else { self.submit_transaction_until_committed(&tx); @@ -288,7 +288,7 @@ impl<'a> CheckVmVersionTestRunner<'a> { .output_data(Default::default()) .build(); if let Some(errmsg) = expected.error_message() { - assert_send_transaction_fail(self.node, &tx, &errmsg); + assert_send_transaction_fail(self.node, &tx, errmsg); } else { self.submit_transaction_until_committed(&tx); } diff --git a/test/src/specs/mining/basic.rs b/test/src/specs/mining/basic.rs index 08446f54ae..77f4ecee2f 100644 --- a/test/src/specs/mining/basic.rs +++ b/test/src/specs/mining/basic.rs @@ -22,7 +22,7 @@ impl Spec for MiningBasic { let transaction = always_success_transaction(node, &cells[0]); node.submit_transaction(&transaction); - mine(&node, 1); + mine(node, 1); let block1 = node.get_tip_block(); assert_eq!( @@ -31,7 +31,7 @@ impl Spec for MiningBasic { ); // skip (proposal_window.closest - 1) block - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); let block3 = node.get_tip_block(); assert_eq!(block3.get_commit_tx_ids(), transaction.get_commit_tx_ids()); diff --git a/test/src/specs/mining/bootstrap.rs b/test/src/specs/mining/bootstrap.rs index e0c3360b4d..f739f7babb 100644 --- a/test/src/specs/mining/bootstrap.rs +++ b/test/src/specs/mining/bootstrap.rs @@ -40,7 +40,7 @@ impl Spec for BootstrapCellbase { ); }); - mine(&node, 1); + mine(node, 1); let blk = node.get_tip_block(); assert!( blk.transactions()[0].is_cellbase() diff --git a/test/src/specs/mining/fee.rs b/test/src/specs/mining/fee.rs index 802e6f5861..7a651bd647 100644 --- a/test/src/specs/mining/fee.rs +++ b/test/src/specs/mining/fee.rs @@ -179,14 +179,14 @@ impl Spec for ProposeButNotCommit { let transaction = always_success_transaction(feed_node, &cells[0]); let txs = vec![transaction]; feed_node.submit_transaction(&txs[0]); - mine(&feed_node, 1); + mine(feed_node, 1); let feed_blocks: Vec<_> = (1..feed_node.get_tip_block_number()) .map(|number| feed_node.get_block_by_number(number)) .collect(); feed_blocks.iter().for_each(|block| { - target_node.submit_block(&block); + target_node.submit_block(block); }); mine(target_node, 2 * FINALIZATION_DELAY_LENGTH); @@ -212,7 +212,7 @@ impl Spec for ProposeDuplicated { .proposal(tx.proposal_short_id()) .build() .as_uncle(); - mine(&node, 1); + mine(node, 1); uncle }; let uncle2 = { @@ -222,7 +222,7 @@ impl Spec for ProposeDuplicated { .nonce(99999.pack()) .build() .as_uncle(); - mine(&node, 1); + mine(node, 1); uncle }; @@ -231,7 +231,7 @@ impl Spec for ProposeDuplicated { .uncle(uncle1) .uncle(uncle2) .build(); - node.submit_transaction(&tx); + node.submit_transaction(tx); node.submit_block(&block); mine(node, 2 * FINALIZATION_DELAY_LENGTH); diff --git a/test/src/specs/mining/uncle.rs b/test/src/specs/mining/uncle.rs index 939faae77b..f302b39f16 100644 --- a/test/src/specs/mining/uncle.rs +++ b/test/src/specs/mining/uncle.rs @@ -183,9 +183,9 @@ impl Spec for PackUnclesIntoEpochStarting { // Convenient way to construct an uncle block fn construct_uncle(node: &Node) -> BlockView { - mine(&node, 1); // Ensure exit IBD mode + mine(node, 1); // Ensure exit IBD mode let uncle = node.construct_uncle(); - mine(&node, 1); + mine(node, 1); uncle } diff --git a/test/src/specs/p2p/malformed_message.rs b/test/src/specs/p2p/malformed_message.rs index c794bb1af2..c58807451d 100644 --- a/test/src/specs/p2p/malformed_message.rs +++ b/test/src/specs/p2p/malformed_message.rs @@ -21,7 +21,7 @@ impl Spec for MalformedMessage { info!("Test node should receive GetHeaders message from node0"); let ret = net.should_receive(node0, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| message.to_enum().item_name() == GetHeaders::NAME) .unwrap_or(false) }); @@ -61,7 +61,7 @@ impl Spec for MalformedMessageWithWhitelist { info!("Test node should receive GetHeaders message from node0"); let ret = net.should_receive(&node0, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| message.to_enum().item_name() == GetHeaders::NAME) .unwrap_or(false) }); diff --git a/test/src/specs/relay/block_relay.rs b/test/src/specs/relay/block_relay.rs index 82d6453337..d4308f5623 100644 --- a/test/src/specs/relay/block_relay.rs +++ b/test/src/specs/relay/block_relay.rs @@ -16,7 +16,7 @@ impl Spec for RelayTooNewBlock { let node0 = &nodes[0]; let node1 = &nodes[1]; let node2 = &nodes[2]; - out_ibd_mode(&nodes); + out_ibd_mode(nodes); node1.connect(node0); let future = Duration::from_secs(6_000).as_millis() as u64; @@ -32,7 +32,7 @@ impl Spec for RelayTooNewBlock { waiting_for_sync(&[node0, node2]); sleep(15); // GET_HEADERS_TIMEOUT 15s - mine(&node0, 1); + mine(node0, 1); let (rpc_client0, rpc_client1) = (node0.rpc_client(), node1.rpc_client()); let ret = wait_until(20, || { let header0 = rpc_client0.get_tip_header(); diff --git a/test/src/specs/relay/compact_block.rs b/test/src/specs/relay/compact_block.rs index d9020a9064..439bcbf918 100644 --- a/test/src/specs/relay/compact_block.rs +++ b/test/src/specs/relay/compact_block.rs @@ -34,7 +34,7 @@ impl Spec for CompactBlockEmptyParentUnknown { ); net.connect(node); - mine(&node, 1); + mine(node, 1); let parent_unknown_block = node .new_block_builder(None, None, None) @@ -54,7 +54,7 @@ impl Spec for CompactBlockEmptyParentUnknown { assert!(!ret, "Node0 should reconstruct empty block failed"); let ret = net.should_receive(node, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| message.to_enum().item_name() == GetHeaders::NAME) .unwrap_or(false) }); @@ -164,7 +164,7 @@ impl Spec for CompactBlockMissingFreshTxs { .transaction(new_tx) .build(); net.send( - &node, + node, SupportProtocols::Relay, build_compact_block(&new_block), ); @@ -172,10 +172,10 @@ impl Spec for CompactBlockMissingFreshTxs { assert!(!ret, "Node0 should be unable to reconstruct the block"); let ret = net.should_receive(node, |data: &Bytes| { - let get_block_txns = RelayMessage::from_slice(&data) + let get_block_txns = RelayMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::GetBlockTransactions::NAME) .unwrap_or(false); - let get_block = SyncMessage::from_slice(&data) + let get_block = SyncMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::GetBlocks::NAME) .unwrap_or(false); get_block_txns || get_block @@ -385,7 +385,7 @@ impl Spec for CompactBlockLoseGetBlockTransactions { mine(node0, 6); // Make node0 and node1 reach the same height - mine(&node1, 1); + mine(node1, 1); node0.connect(node1); waiting_for_sync(&[node0, node1]); @@ -400,10 +400,10 @@ impl Spec for CompactBlockLoseGetBlockTransactions { net.send(node0, SupportProtocols::Relay, build_compact_block(&block)); let ret = net.should_receive(node0, |data: &Bytes| { - let get_block_txns = RelayMessage::from_slice(&data) + let get_block_txns = RelayMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::GetBlockTransactions::NAME) .unwrap_or(false); - let get_block = SyncMessage::from_slice(&data) + let get_block = SyncMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::GetBlocks::NAME) .unwrap_or(false); get_block_txns || get_block diff --git a/test/src/specs/relay/get_block_proposal_process.rs b/test/src/specs/relay/get_block_proposal_process.rs index 187ab1eebf..9aa81d05ab 100644 --- a/test/src/specs/relay/get_block_proposal_process.rs +++ b/test/src/specs/relay/get_block_proposal_process.rs @@ -52,7 +52,7 @@ impl Spec for ProposalRespondSizelimit { net.connect(node0); - net.send(&node0, SupportProtocols::Relay, message.as_bytes()); + net.send(node0, SupportProtocols::Relay, message.as_bytes()); assert!( node0.rpc_client().get_banned_addresses().is_empty(), diff --git a/test/src/specs/relay/get_block_transactions_process.rs b/test/src/specs/relay/get_block_transactions_process.rs index e917e21b3c..7c5699de74 100644 --- a/test/src/specs/relay/get_block_transactions_process.rs +++ b/test/src/specs/relay/get_block_transactions_process.rs @@ -21,7 +21,7 @@ impl Spec for MissingUncleRequest { ); net.connect(node); - mine(&node, 1); + mine(node, 1); let builder = node.new_block_builder(None, None, None); let block1 = builder.clone().nonce(0.pack()).build(); @@ -45,7 +45,7 @@ impl Spec for MissingUncleRequest { net.send(node, SupportProtocols::Relay, message.as_bytes()); let ret = net.should_receive(node, |data: &Bytes| { - RelayMessage::from_slice(&data) + RelayMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::BlockTransactions::NAME) .unwrap_or(false) }); diff --git a/test/src/specs/relay/transaction_relay.rs b/test/src/specs/relay/transaction_relay.rs index 02376f76b7..d3f1dead71 100644 --- a/test/src/specs/relay/transaction_relay.rs +++ b/test/src/specs/relay/transaction_relay.rs @@ -63,9 +63,9 @@ impl Spec for TransactionRelayMultiple { }); assert!(relayed, "all transactions should be relayed"); - mine(&node0, 1); - mine(&node0, 1); - mine(&node0, 1); + mine(node0, 1); + mine(node0, 1); + mine(node0, 1); waiting_for_sync(nodes); nodes.iter().for_each(|node| { node.assert_tx_pool_size(0, 0); @@ -119,7 +119,7 @@ pub struct RelayInvalidTransaction; impl Spec for RelayInvalidTransaction { fn run(&self, nodes: &mut Vec) { let node = &nodes.pop().unwrap(); - mine(&node, 4); + mine(node, 4); let mut net = Net::new( self.name(), node.consensus(), @@ -145,7 +145,7 @@ impl Spec for RelayInvalidTransaction { ); info!("Sending RelayTransactions to node"); net.send( - &node, + node, SupportProtocols::Relay, build_relay_txs(&[(dummy_tx, 333)]), ); diff --git a/test/src/specs/rpc/get_blockchain_info.rs b/test/src/specs/rpc/get_blockchain_info.rs index f66ea355d1..23ebb5c66a 100644 --- a/test/src/specs/rpc/get_blockchain_info.rs +++ b/test/src/specs/rpc/get_blockchain_info.rs @@ -73,7 +73,7 @@ impl Spec for RpcGetBlockchainInfo { check_median_time(blockchain_info, node0); // mine 1 block to make tip_block_number is even - mine(&node0, 1); + mine(node0, 1); let blockchain_info = node0.rpc_client().get_blockchain_info(); assert_eq!( 2, @@ -85,7 +85,7 @@ impl Spec for RpcGetBlockchainInfo { check_median_time(blockchain_info, node0); // mine epoch_length blocks to make epoch number change - mine(&node0, epoch_length); + mine(node0, epoch_length); let blockchain_info = node0.rpc_client().get_blockchain_info(); assert_eq!( 1, diff --git a/test/src/specs/rpc/transaction_proof.rs b/test/src/specs/rpc/transaction_proof.rs index de92244522..c449047107 100644 --- a/test/src/specs/rpc/transaction_proof.rs +++ b/test/src/specs/rpc/transaction_proof.rs @@ -7,11 +7,11 @@ pub struct RpcTransactionProof; impl Spec for RpcTransactionProof { fn run(&self, nodes: &mut Vec) { let node0 = &nodes[0]; - mine(&node0, DEFAULT_TX_PROPOSAL_WINDOW.1 + 2); + mine(node0, DEFAULT_TX_PROPOSAL_WINDOW.1 + 2); let tx_hash = node0.generate_transaction().unpack(); let tx_hashes = vec![tx_hash]; - mine(&node0, 3); + mine(node0, 3); let proof = node0 .rpc_client() .inner() diff --git a/test/src/specs/rpc/truncate.rs b/test/src/specs/rpc/truncate.rs index b20d723b15..609af19bf9 100644 --- a/test/src/specs/rpc/truncate.rs +++ b/test/src/specs/rpc/truncate.rs @@ -17,9 +17,9 @@ impl Spec for RpcTruncate { let to_truncate = node.get_block_by_number(node.get_tip_block_number()).hash(); - node.submit_transaction(&tx1); + node.submit_transaction(tx1); mine(node, 3); - node.submit_transaction(&tx2); + node.submit_transaction(tx2); // tx1 is already committed on chain, tx2 is still in tx-pool. @@ -68,7 +68,7 @@ impl Spec for RpcTruncate { // The chain can generate new blocks mine(node, 3); - node.submit_transaction(&tx1); + node.submit_transaction(tx1); mine(node, 3); let cell1 = node .rpc_client() diff --git a/test/src/specs/sync/block_sync.rs b/test/src/specs/sync/block_sync.rs index 1213035e5b..feb2b384b0 100644 --- a/test/src/specs/sync/block_sync.rs +++ b/test/src/specs/sync/block_sync.rs @@ -29,7 +29,7 @@ impl Spec for BlockSyncFromOne { assert_eq!(0, rpc_client1.get_tip_block_number()); (0..3).for_each(|_| { - mine(&node0, 1); + mine(node0, 1); }); node1.connect(node0); @@ -450,7 +450,7 @@ impl Spec for SyncTooNewBlock { node2.disconnect(node0); sleep(15); // GET_HEADERS_TIMEOUT 15s - mine(&node0, 1); + mine(node0, 1); let ret = wait_until(20, || { let header0 = rpc_client0.get_tip_header(); let header1 = rpc_client1.get_tip_header(); @@ -482,7 +482,7 @@ impl Spec for HeaderSyncCycle { .as_bytes(); let ret = net.should_receive(node0, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| matches!(message.to_enum(), packed::SyncMessageUnion::GetHeaders(_))) .unwrap_or(false) }); @@ -491,7 +491,7 @@ impl Spec for HeaderSyncCycle { net.send(node0, SupportProtocols::Sync, msg); let ret = net.should_receive(node0, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| matches!(message.to_enum(), packed::SyncMessageUnion::GetHeaders(_))) .unwrap_or(false) }); @@ -528,7 +528,7 @@ fn sync_get_blocks(net: &Net, node: &Node, hashes: &[Byte32]) { fn should_receive_get_blocks_message(net: &Net, node: &Node, last_block_hash: Byte32) { let ret = net.should_receive(node, |data: &Bytes| { - SyncMessage::from_slice(&data) + SyncMessage::from_slice(data) .map(|message| match message.to_enum() { packed::SyncMessageUnion::GetBlocks(get_blocks) => { let block_hashes = get_blocks.block_hashes(); diff --git a/test/src/specs/sync/chain_forks.rs b/test/src/specs/sync/chain_forks.rs index 0d1f4a42a2..93b1e6171f 100644 --- a/test/src/specs/sync/chain_forks.rs +++ b/test/src/specs/sync/chain_forks.rs @@ -561,8 +561,8 @@ impl Spec for ForksContainSameUncle { info!("(1) Construct an uncle before fork point"); let uncle = node_a.construct_uncle(); - mine(&node_a, 1); - mine(&node_b, 1); + mine(node_a, 1); + mine(node_b, 1); info!("(2) Add `uncle` into different forks in node_a and node_b"); node_a.submit_block(&uncle); @@ -580,7 +580,7 @@ impl Spec for ForksContainSameUncle { node_b.submit_block(&block_b); info!("(3) Make node_b's fork longer(to help check whether is synchronized)"); - mine(&node_b, 1); + mine(node_b, 1); info!("(4) Connect node_a and node_b, expect that they sync into convergence"); node_a.connect(node_b); diff --git a/test/src/specs/sync/invalid_locator_size.rs b/test/src/specs/sync/invalid_locator_size.rs index b8ec7d9895..e11262ca30 100644 --- a/test/src/specs/sync/invalid_locator_size.rs +++ b/test/src/specs/sync/invalid_locator_size.rs @@ -40,11 +40,10 @@ impl Spec for InvalidLocatorSize { assert!(ret, "Node0 should disconnect test node"); let ret = wait_until(10, || { - net.controller() + !net.controller() .connected_peers() .iter() - .find(|(_, peer)| peer.connected_addr.to_string() == node0.p2p_address()) - .is_none() + .any(|(_, peer)| peer.connected_addr.to_string() == node0.p2p_address()) }); assert!( ret, diff --git a/test/src/specs/tx_pool/cellbase_maturity.rs b/test/src/specs/tx_pool/cellbase_maturity.rs index ca8eb37284..68882b0f28 100644 --- a/test/src/specs/tx_pool/cellbase_maturity.rs +++ b/test/src/specs/tx_pool/cellbase_maturity.rs @@ -23,7 +23,7 @@ impl Spec for CellbaseMaturity { (0..MATURITY - DEFAULT_TX_PROPOSAL_WINDOW.0).for_each(|i| { info!("Tx is not maturity in N + {} block", i); assert_send_transaction_fail(node, &tx, "CellbaseImmaturity"); - mine(&node, 1); + mine(node, 1); }); info!( @@ -38,9 +38,9 @@ impl Spec for CellbaseMaturity { "Tx will be added to proposed pool in N + {} block", MATURITY ); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); node.assert_tx_pool_size(0, 1); - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 0); } diff --git a/test/src/specs/tx_pool/dead_cell_deps.rs b/test/src/specs/tx_pool/dead_cell_deps.rs index 32257b66b4..9e9a72a9bb 100644 --- a/test/src/specs/tx_pool/dead_cell_deps.rs +++ b/test/src/specs/tx_pool/dead_cell_deps.rs @@ -277,12 +277,12 @@ impl Spec for CellBeingCellDepAndSpentInSameBlockTestGetBlockTemplate { // Inside `mine`, RPC `get_block_template` will be involved, that's our testing interface. mine(node0, node0.consensus().tx_proposal_window().farthest()); + + assert!(is_transaction_committed(node0, &tx_b)); if b_weightier_than_c { // B's tx-weight > C's tx-weight - assert!(is_transaction_committed(node0, &tx_b)); } else { // B's tx-weight < C's tx-weight, - assert!(is_transaction_committed(node0, &tx_b)); assert!(is_transaction_committed(node0, &tx_c)); } } diff --git a/test/src/specs/tx_pool/declared_wrong_cycles.rs b/test/src/specs/tx_pool/declared_wrong_cycles.rs index f6ab3156e0..7e3f661987 100644 --- a/test/src/specs/tx_pool/declared_wrong_cycles.rs +++ b/test/src/specs/tx_pool/declared_wrong_cycles.rs @@ -26,7 +26,7 @@ impl Spec for DeclaredWrongCycles { let tx = node0.new_transaction_spend_tip_cellbase(); - relay_tx(&net, &node0, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); + relay_tx(&net, node0, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); @@ -54,7 +54,7 @@ impl Spec for DeclaredWrongCyclesChunk { let tx = node0.new_transaction_spend_tip_cellbase(); - relay_tx(&net, &node0, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); + relay_tx(&net, node0, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); @@ -90,7 +90,7 @@ impl Spec for DeclaredWrongCyclesAndRelayAgain { let tx = node0.new_transaction_spend_tip_cellbase(); // relay tx to node0 with wrong cycles net.connect(node0); - relay_tx(&net, &node0, tx.clone(), ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); + relay_tx(&net, node0, tx.clone(), ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); let ret = wait_until(10, || node0.rpc_client().get_peers().is_empty()); assert!( ret, @@ -107,7 +107,7 @@ impl Spec for DeclaredWrongCyclesAndRelayAgain { // relay tx to node1 with correct cycles net.connect(node1); - relay_tx(&net, &node1, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); + relay_tx(&net, node1, tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); diff --git a/test/src/specs/tx_pool/depend_tx_in_same_block.rs b/test/src/specs/tx_pool/depend_tx_in_same_block.rs index 70e425b631..70e21e3893 100644 --- a/test/src/specs/tx_pool/depend_tx_in_same_block.rs +++ b/test/src/specs/tx_pool/depend_tx_in_same_block.rs @@ -17,16 +17,16 @@ impl Spec for DepentTxInSameBlock { node0.rpc_client().send_transaction(tx.data().into()); info!("Generated 2 tx should be included in the next block's proposals"); - mine(&node0, 1); + mine(node0, 1); let proposal_block = node0.get_tip_block(); let proposal_ids: Vec<_> = proposal_block.union_proposal_ids_iter().collect(); assert!(proposal_ids.contains(&ProposalShortId::from_tx_hash(&tx_hash_0))); assert!(proposal_ids.contains(&ProposalShortId::from_tx_hash(&tx_hash_1))); - mine(&node0, 1); + mine(node0, 1); info!("Generated 2 tx should be included in the next + 2 block"); - mine(&node0, 1); + mine(node0, 1); let tip_block = node0.get_tip_block(); let commit_txs_hash: Vec<_> = tip_block .transactions() diff --git a/test/src/specs/tx_pool/different_txs_with_same_input.rs b/test/src/specs/tx_pool/different_txs_with_same_input.rs index c941a2fd4e..19d2a08082 100644 --- a/test/src/specs/tx_pool/different_txs_with_same_input.rs +++ b/test/src/specs/tx_pool/different_txs_with_same_input.rs @@ -31,9 +31,9 @@ impl Spec for DifferentTxsWithSameInput { node0.rpc_client().send_transaction(tx1.data().into()); node0.rpc_client().send_transaction(tx2.data().into()); - mine(&node0, 1); - mine(&node0, 1); - mine(&node0, 1); + mine(node0, 1); + mine(node0, 1); + mine(node0, 1); // tx pool statics should reset node0.assert_tx_pool_statics(0, 0); diff --git a/test/src/specs/tx_pool/limit.rs b/test/src/specs/tx_pool/limit.rs index fd94b6e991..70bb704e08 100644 --- a/test/src/specs/tx_pool/limit.rs +++ b/test/src/specs/tx_pool/limit.rs @@ -50,8 +50,8 @@ impl Spec for SizeLimit { assert_send_transaction_fail(node, &tx, "Transaction pool exceeded maximum size limit"); node.assert_tx_pool_serialized_size(max_tx_num * one_tx_size); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); - mine(&node, 1); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, 1); node.assert_tx_pool_serialized_size(0); } @@ -107,8 +107,8 @@ impl Spec for CyclesLimit { assert_send_transaction_fail(node, &tx, "Transaction pool exceeded maximum cycles limit"); node.assert_tx_pool_cycles(max_tx_num * one_tx_cycles); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); - mine(&node, 1); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, 1); node.assert_tx_pool_cycles(0); } diff --git a/test/src/specs/tx_pool/orphan_tx.rs b/test/src/specs/tx_pool/orphan_tx.rs index 1fd17855be..39a1b8daae 100644 --- a/test/src/specs/tx_pool/orphan_tx.rs +++ b/test/src/specs/tx_pool/orphan_tx.rs @@ -24,7 +24,7 @@ impl Spec for OrphanTxAccepted { let parent_tx = node0.new_transaction_spend_tip_cellbase(); let child_tx = node0.new_transaction(parent_tx.hash()); - relay_tx(&net, &node0, child_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); + relay_tx(&net, node0, child_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 1 && tx_pool_info.pending.value() == 0 @@ -34,7 +34,7 @@ impl Spec for OrphanTxAccepted { "Send child tx first, it will be added to orphan tx pool" ); - relay_tx(&net, &node0, parent_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); + relay_tx(&net, node0, parent_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 0 && tx_pool_info.pending.value() == 2 @@ -65,7 +65,7 @@ impl Spec for OrphanTxRejected { let parent_tx = node0.new_transaction_spend_tip_cellbase(); let child_tx = node0.new_transaction(parent_tx.hash()); - relay_tx(&net, &node0, child_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); + relay_tx(&net, node0, child_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE + 1); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 1 && tx_pool_info.pending.value() == 0 @@ -75,7 +75,7 @@ impl Spec for OrphanTxRejected { "Send child tx first, it will be added to orphan tx pool" ); - relay_tx(&net, &node0, parent_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); + relay_tx(&net, node0, parent_tx, ALWAYS_SUCCESS_SCRIPT_CYCLE); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 0 && tx_pool_info.pending.value() == 1 diff --git a/test/src/specs/tx_pool/pool_resurrect.rs b/test/src/specs/tx_pool/pool_resurrect.rs index 58e564acb0..e2b9783af3 100644 --- a/test/src/specs/tx_pool/pool_resurrect.rs +++ b/test/src/specs/tx_pool/pool_resurrect.rs @@ -48,7 +48,7 @@ impl Spec for PoolResurrect { node0.assert_tx_pool_size(0, txs_hash.len() as u64); info!("Generate 1 block on node0, 6 txs should be included in this block"); - mine(&node0, 1); + mine(node0, 1); node0.assert_tx_pool_size(0, 0); } } diff --git a/test/src/specs/tx_pool/reference_header_maturity.rs b/test/src/specs/tx_pool/reference_header_maturity.rs index af675d237d..5fb1e9e500 100644 --- a/test/src/specs/tx_pool/reference_header_maturity.rs +++ b/test/src/specs/tx_pool/reference_header_maturity.rs @@ -33,7 +33,7 @@ impl Spec for ReferenceHeaderMaturity { let remained_blocks_in_epoch = tip_epoch.length() - tip_epoch.index(); mine(node, remained_blocks_in_epoch); } else { - mine(&node, 1); + mine(node, 1); } } else { break; @@ -67,7 +67,7 @@ impl Spec for ReferenceHeaderMaturity { let remained_blocks_in_epoch = tip_epoch.length() - tip_epoch.index(); mine(node, remained_blocks_in_epoch); } else { - mine(&node, 1); + mine(node, 1); } } } @@ -78,9 +78,9 @@ impl Spec for ReferenceHeaderMaturity { node.assert_tx_pool_size(1, 0); info!("Tx will be added to proposed pool"); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); node.assert_tx_pool_size(0, 1); - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 0); info!("Tx will be eventually accepted on chain"); diff --git a/test/src/specs/tx_pool/remove_tx.rs b/test/src/specs/tx_pool/remove_tx.rs index 923dde9676..336a48fefd 100644 --- a/test/src/specs/tx_pool/remove_tx.rs +++ b/test/src/specs/tx_pool/remove_tx.rs @@ -35,7 +35,7 @@ impl Spec for RemoveTx { node0.assert_tx_pool_statics(0, 0); - relay_tx(&net, &node0, child_tx.clone(), ALWAYS_SUCCESS_SCRIPT_CYCLE); + relay_tx(&net, node0, child_tx.clone(), ALWAYS_SUCCESS_SCRIPT_CYCLE); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.orphan.value() == 1 && tx_pool_info.pending.value() == 0 @@ -99,7 +99,7 @@ impl Spec for RemoveTx { assert!(check::is_transaction_pending(node0, &tx)); node0.assert_tx_pool_statics(tx_size, tx_cycles); - mine(&node0, 2); + mine(node0, 2); let result = wait_until(5, || { let tx_pool_info = node0.get_tip_tx_pool_info(); tx_pool_info.pending.value() == 0 && tx_pool_info.proposed.value() == 1 diff --git a/test/src/specs/tx_pool/reorg_proposals.rs b/test/src/specs/tx_pool/reorg_proposals.rs index 57e2597c7c..7f00b81768 100644 --- a/test/src/specs/tx_pool/reorg_proposals.rs +++ b/test/src/specs/tx_pool/reorg_proposals.rs @@ -37,10 +37,10 @@ impl Spec for ReorgHandleProposals { // 2. `node_a` proposes `tx_family.a`; `node_b` proposes `tx_family.b` into the // current proposal-window. // From now, fork-A and fork-B start to diverge(the common point `X` in the above graph) - node_a.submit_transaction(&family.a()); - node_a.submit_transaction(&family.b()); - node_b.submit_transaction(&family.a()); - node_b.submit_transaction(&family.b()); + node_a.submit_transaction(family.a()); + node_a.submit_transaction(family.b()); + node_b.submit_transaction(family.a()); + node_b.submit_transaction(family.b()); node_a.submit_block(&propose(node_a, &[family.a()])); node_b.submit_block(&propose(node_b, &[family.b()])); (0..window.closest()).for_each(|_| { @@ -79,8 +79,8 @@ impl Spec for ReorgHandleProposals { // fork-A, whose valid proposals are `[tx_family.a]` which be able to be committed. assert_new_block_committed(node_a, &[]); assert_new_block_committed(node_b, &[family.a().clone()]); - mine(&node_a, 1); - mine(&node_b, 1); + mine(node_a, 1); + mine(node_b, 1); } } diff --git a/test/src/specs/tx_pool/send_defected_binary.rs b/test/src/specs/tx_pool/send_defected_binary.rs index 7f97d0879c..270949635b 100644 --- a/test/src/specs/tx_pool/send_defected_binary.rs +++ b/test/src/specs/tx_pool/send_defected_binary.rs @@ -77,9 +77,9 @@ impl Spec for SendDefectedBinary { let witness_len = witness.as_slice().len() as u64; let message = { let mut hasher = new_blake2b(); - hasher.update(&tx_hash.as_slice()); + hasher.update(tx_hash.as_slice()); hasher.update(&witness_len.to_le_bytes()); - hasher.update(&witness.as_slice()); + hasher.update(witness.as_slice()); let mut buf = [0u8; 32]; hasher.finalize(&mut buf); H256::from(buf) diff --git a/test/src/specs/tx_pool/send_large_cycles_tx.rs b/test/src/specs/tx_pool/send_large_cycles_tx.rs index 6dc9b54459..d72bfb7fc8 100644 --- a/test/src/specs/tx_pool/send_large_cycles_tx.rs +++ b/test/src/specs/tx_pool/send_large_cycles_tx.rs @@ -43,15 +43,15 @@ impl Spec for SendLargeCyclesTxInBlock { mine_until_out_bootstrap_period(node1); info!("Generate large cycles tx"); - let tx = build_tx(&node1, &self.random_key.privkey, self.random_key.lock_arg()); + let tx = build_tx(node1, &self.random_key.privkey, self.random_key.lock_arg()); info!("Node0 mine large cycles tx"); - node0.connect(&node1); + node0.connect(node1); let result = wait_until(60, || { node1.get_tip_block_number() == node0.get_tip_block_number() }); assert!(result, "node0 can't sync with node1"); - node0.disconnect(&node1); + node0.disconnect(node1); let ret = node0.rpc_client().send_transaction_result(tx.data().into()); ret.expect("package large cycles tx"); let result = wait_until(60, || { @@ -61,10 +61,10 @@ impl Spec for SendLargeCyclesTxInBlock { ret.is_some() && matches!(ret.unwrap().tx_status.status, Status::Pending) }); assert!(result, "large cycles tx rejected by node0"); - mine(&node0, 3); + mine(node0, 3); let block: BlockView = node0.get_tip_block(); assert_eq!(block.transactions()[1], tx); - node0.connect(&node1); + node0.connect(node1); info!("Wait block relay to node1"); let result = wait_until(60, || { @@ -104,10 +104,10 @@ impl Spec for SendLargeCyclesTxToRelay { let node1 = &nodes[1]; mine_until_out_bootstrap_period(node1); - node0.connect(&node1); + node0.connect(node1); info!("Generate large cycles tx"); - let tx = build_tx(&node1, &self.random_key.privkey, self.random_key.lock_arg()); + let tx = build_tx(node1, &self.random_key.privkey, self.random_key.lock_arg()); // send tx let ret = node1.rpc_client().send_transaction_result(tx.data().into()); assert!(ret.is_ok()); @@ -155,7 +155,7 @@ impl Spec for NotifyLargeCyclesTx { mine_until_out_bootstrap_period(node0); info!("Generate large cycles tx"); - let tx = build_tx(&node0, &self.random_key.privkey, self.random_key.lock_arg()); + let tx = build_tx(node0, &self.random_key.privkey, self.random_key.lock_arg()); // send tx let _ = node0.rpc_client().notify_transaction(tx.data().into()); @@ -197,7 +197,7 @@ impl Spec for LoadProgramFailedTx { mine_until_out_bootstrap_period(node0); info!("Generate large cycles tx"); - let tx = build_tx(&node0, &self.random_key.privkey, self.random_key.lock_arg()); + let tx = build_tx(node0, &self.random_key.privkey, self.random_key.lock_arg()); // send tx let _ = node0.rpc_client().notify_transaction(tx.data().into()); @@ -240,7 +240,7 @@ impl Spec for RelayWithWrongTx { mine_until_out_bootstrap_period(node0); let rpc_client = node0.rpc_client(); - let tx = build_tx(&node0, &self.random_key.privkey, self.random_key.lock_arg()); + let tx = build_tx(node0, &self.random_key.privkey, self.random_key.lock_arg()); let mut net = Net::new( self.name(), @@ -249,7 +249,7 @@ impl Spec for RelayWithWrongTx { ); net.connect(node0); - relay_tx(&net, &node0, tx, 100_000_000); + relay_tx(&net, node0, tx, 100_000_000); let ret = wait_until(10, || { let peers = rpc_client.get_peers(); peers.is_empty() @@ -261,14 +261,14 @@ impl Spec for RelayWithWrongTx { rpc_client.clear_banned_addresses(); // Advance one block, in order to prevent tx hash is same - mine(&node0, 1); + mine(node0, 1); let mut generator = Generator::new(); - let tx_wrong_pk = build_tx(&node0, &generator.gen_privkey(), self.random_key.lock_arg()); + let tx_wrong_pk = build_tx(node0, &generator.gen_privkey(), self.random_key.lock_arg()); net.connect(node0); - relay_tx(&net, &node0, tx_wrong_pk, 100_000_000); + relay_tx(&net, node0, tx_wrong_pk, 100_000_000); let ret = wait_until(10, || { let peers = rpc_client.get_peers(); peers.is_empty() @@ -340,9 +340,9 @@ fn build_tx(node: &Node, privkey: &Privkey, lock_arg: Bytes) -> TransactionView let witness_len = witness.as_slice().len() as u64; let message = { let mut hasher = new_blake2b(); - hasher.update(&tx_hash.as_bytes()); + hasher.update(tx_hash.as_bytes()); hasher.update(&witness_len.to_le_bytes()); - hasher.update(&witness.as_slice()); + hasher.update(witness.as_slice()); let mut buf = [0u8; 32]; hasher.finalize(&mut buf); H256::from(buf) diff --git a/test/src/specs/tx_pool/send_multisig_secp_tx.rs b/test/src/specs/tx_pool/send_multisig_secp_tx.rs index 9c666b8c3b..95a98b50f5 100644 --- a/test/src/specs/tx_pool/send_multisig_secp_tx.rs +++ b/test/src/specs/tx_pool/send_multisig_secp_tx.rs @@ -76,9 +76,9 @@ impl Spec for SendMultiSigSecpTxUseDepGroup { let witness_len = witness.as_slice().len() as u64; let message = { let mut hasher = new_blake2b(); - hasher.update(&tx_hash.as_slice()); + hasher.update(tx_hash.as_slice()); hasher.update(&witness_len.to_le_bytes()); - hasher.update(&witness.as_slice()); + hasher.update(witness.as_slice()); let mut buf = [0u8; 32]; hasher.finalize(&mut buf); H256::from(buf) @@ -129,7 +129,7 @@ fn gen_multi_sign_script(keys: &[Privkey], threshold: u8, require_first_n: u8) - .collect::>(); let mut script = vec![0u8, require_first_n, threshold, pubkeys.len() as u8]; pubkeys.iter().for_each(|pubkey| { - script.extend_from_slice(&blake160(&pubkey.serialize()).as_bytes()); + script.extend_from_slice(blake160(&pubkey.serialize()).as_bytes()); }); script.into() } diff --git a/test/src/specs/tx_pool/send_secp_tx.rs b/test/src/specs/tx_pool/send_secp_tx.rs index 7b013282fc..5d5e6fdc17 100644 --- a/test/src/specs/tx_pool/send_secp_tx.rs +++ b/test/src/specs/tx_pool/send_secp_tx.rs @@ -69,9 +69,9 @@ impl Spec for SendSecpTxUseDepGroup { let witness_len = witness.as_slice().len() as u64; let message = { let mut hasher = new_blake2b(); - hasher.update(&tx_hash.as_slice()); + hasher.update(tx_hash.as_slice()); hasher.update(&witness_len.to_le_bytes()); - hasher.update(&witness.as_slice()); + hasher.update(witness.as_slice()); let mut buf = [0u8; 32]; hasher.finalize(&mut buf); H256::from(buf) @@ -186,9 +186,9 @@ impl Spec for CheckTypical2In2OutTx { let witness2_len = witness2.len() as u64; let message = { let mut hasher = new_blake2b(); - hasher.update(&tx_hash.as_bytes()); + hasher.update(tx_hash.as_bytes()); hasher.update(&witness_len.to_le_bytes()); - hasher.update(&witness.as_slice()); + hasher.update(witness.as_slice()); hasher.update(&witness2_len.to_le_bytes()); hasher.update(&witness2); let mut buf = [0u8; 32]; diff --git a/test/src/specs/tx_pool/valid_since.rs b/test/src/specs/tx_pool/valid_since.rs index 3fc4dd6db4..c8b991f6c5 100644 --- a/test/src/specs/tx_pool/valid_since.rs +++ b/test/src/specs/tx_pool/valid_since.rs @@ -50,7 +50,7 @@ impl ValidSince { &transaction, "TransactionFailedToVerify: Verification failed Transaction(Immature(", ); - mine(&node, 1); + mine(node, 1); } // Success to send transaction after cellbase immaturity and since immaturity @@ -81,7 +81,7 @@ impl ValidSince { &transaction, "TransactionFailedToVerify: Verification failed Transaction(Immature(", ); - mine(&node, 1); + mine(node, 1); } // Success to send transaction after cellbase immaturity and since immaturity @@ -99,7 +99,7 @@ impl ValidSince { let median_time_block_count = node.consensus().median_time_block_count() as u64; mine_until_out_bootstrap_period(node); let old_median_time: u64 = node.rpc_client().get_blockchain_info().median_time.into(); - mine(&node, 1); + mine(node, 1); let cellbase = node.get_tip_block().transactions()[0].clone(); sleep(Duration::from_secs(2)); @@ -208,7 +208,7 @@ impl ValidSince { &tx, "TransactionFailedToVerify: Verification failed Transaction(Immature(", ); - mine(&node, 1); + mine(node, 1); }); info!( @@ -223,10 +223,10 @@ impl ValidSince { "Tx will be added to proposed pool in N + {} block", relative_blocks ); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); node.assert_tx_pool_size(0, 1); - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 0); // test absolute block number since @@ -243,7 +243,7 @@ impl ValidSince { (tip_number..absolute_block - DEFAULT_TX_PROPOSAL_WINDOW.0).for_each(|i| { info!("Tx is Immature in block {}", i); assert_send_transaction_fail(node, &tx, "Not mature cause of since condition"); - mine(&node, 1); + mine(node, 1); }); info!( @@ -258,9 +258,9 @@ impl ValidSince { "Tx will be added to proposed pool in {} block", absolute_block ); - mine(&node, DEFAULT_TX_PROPOSAL_WINDOW.0); + mine(node, DEFAULT_TX_PROPOSAL_WINDOW.0); node.assert_tx_pool_size(0, 1); - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 0); } @@ -273,15 +273,15 @@ impl ValidSince { node.assert_tx_pool_size(1, 0); assert!(check::is_transaction_pending(node, transaction)); // Gap - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(1, 0); assert!(check::is_transaction_pending(node, transaction)); // Proposed - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 1); assert!(check::is_transaction_proposed(node, transaction)); // Committed - mine(&node, 1); + mine(node, 1); node.assert_tx_pool_size(0, 0); assert!(check::is_transaction_committed(node, transaction)); diff --git a/test/src/util/transaction.rs b/test/src/util/transaction.rs index d3d4843837..599b769bca 100644 --- a/test/src/util/transaction.rs +++ b/test/src/util/transaction.rs @@ -57,7 +57,7 @@ pub fn relay_tx(net: &Net, node: &Node, tx: TransactionView, cycles: u64) { net.send(node, SupportProtocols::Relay, tx_hashes_msg.as_bytes()); let ret = net.should_receive(node, |data: &Bytes| { - packed::RelayMessage::from_slice(&data) + packed::RelayMessage::from_slice(data) .map(|message| message.to_enum().item_name() == packed::GetRelayTransactions::NAME) .unwrap_or(false) }); diff --git a/traits/Cargo.toml b/traits/Cargo.toml index 789aaae4c4..67f43363de 100644 --- a/traits/Cargo.toml +++ b/traits/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-traits" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/tx-pool/Cargo.toml b/tx-pool/Cargo.toml index 561a022a89..1f4b8a8514 100644 --- a/tx-pool/Cargo.toml +++ b/tx-pool/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-tx-pool" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The CKB tx-pool" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/tx-pool/src/block_assembler/tests.rs b/tx-pool/src/block_assembler/tests.rs index f00fc3c44c..dc7c80e13c 100644 --- a/tx-pool/src/block_assembler/tests.rs +++ b/tx-pool/src/block_assembler/tests.rs @@ -17,7 +17,7 @@ fn test_candidate_uncles_basic() { assert!(!candidate_uncles.insert(block.clone())); assert_eq!(candidate_uncles.len(), 1); - assert!(candidate_uncles.remove_by_number(&block)); + assert!(candidate_uncles.remove_by_number(block)); assert_eq!(candidate_uncles.len(), 0); assert_eq!(candidate_uncles.map.len(), 0); } diff --git a/tx-pool/src/chunk_process.rs b/tx-pool/src/chunk_process.rs index 9b0346ce94..de5c9d3b56 100644 --- a/tx-pool/src/chunk_process.rs +++ b/tx-pool/src/chunk_process.rs @@ -14,7 +14,6 @@ use ckb_verification::{ ContextualWithoutScriptTransactionVerifier, ScriptError, ScriptVerifier, ScriptVerifyResult, ScriptVerifyState, TimeRelativeTransactionVerifier, TxVerifyEnv, }; -use std::convert::TryInto; use std::sync::Arc; const MIN_STEP_CYCLE: Cycle = 10_000_000; diff --git a/tx-pool/src/component/commit_txs_scanner.rs b/tx-pool/src/component/commit_txs_scanner.rs index 560d4e0591..162e57cea2 100644 --- a/tx-pool/src/component/commit_txs_scanner.rs +++ b/tx-pool/src/component/commit_txs_scanner.rs @@ -194,27 +194,27 @@ impl<'a> CommitTxsScanner<'a> { // stale due to ancestor inclusion in the block) // Also skip transactions that we've already failed to add. fn skip_proposed_entry(&self, short_id: &ProposalShortId) -> bool { - self.fetched_txs.contains(&short_id) - || self.modified_entries.contains_key(&short_id) - || self.failed_txs.contains(&short_id) + self.fetched_txs.contains(short_id) + || self.modified_entries.contains_key(short_id) + || self.failed_txs.contains(short_id) } /// Add descendants of given transactions to `modified_entries` with ancestor /// state updated assuming given transactions are inBlock. fn update_modified_entries(&mut self, already_added: &LinkedHashMap) { for (id, entry) in already_added { - let descendants = self.proposed_pool.calc_descendants(&id); + let descendants = self.proposed_pool.calc_descendants(id); for desc_id in descendants .iter() .filter(|id| !already_added.contains_key(id)) { - let mut desc = self.modified_entries.remove(&desc_id).unwrap_or_else(|| { + let mut desc = self.modified_entries.remove(desc_id).unwrap_or_else(|| { self.proposed_pool - .get(&desc_id) + .get(desc_id) .map(ToOwned::to_owned) .expect("pool consistent") }); - desc.sub_entry_weight(&entry); + desc.sub_entry_weight(entry); self.modified_entries.insert(desc); } } diff --git a/tx-pool/src/component/container.rs b/tx-pool/src/component/container.rs index d03e4b3377..7924c7d83e 100644 --- a/tx-pool/src/component/container.rs +++ b/tx-pool/src/component/container.rs @@ -136,7 +136,7 @@ impl TxLinksMap { .cloned() .unwrap_or_default(); - calc_relation_ids(Cow::Owned(direct), &self, relation) + calc_relation_ids(Cow::Owned(direct), self, relation) } pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet { @@ -238,7 +238,7 @@ impl SortedTxMap { } let parent_hash = &input_pt.tx_hash(); - let id = ProposalShortId::from_tx_hash(&parent_hash); + let id = ProposalShortId::from_tx_hash(parent_hash); if self.links.inner.contains_key(&id) { parents.insert(id); } @@ -262,7 +262,7 @@ impl SortedTxMap { // update parents references for ancestor_id in &ancestors { let ancestor = self.entries.get(ancestor_id).expect("pool consistent"); - entry.add_entry_weight(&ancestor); + entry.add_entry_weight(ancestor); } if entry.ancestors_count > self.max_ancestors_count { @@ -270,7 +270,7 @@ impl SortedTxMap { } for parent in &parents { - self.links.add_child(&parent, short_id.clone()); + self.links.add_child(parent, short_id.clone()); } // insert links @@ -321,7 +321,7 @@ impl SortedTxMap { } fn remove_unchecked(&mut self, id: &ProposalShortId) -> Option { - self.entries.remove(&id).map(|entry| { + self.entries.remove(id).map(|entry| { self.sorted_index.remove(&entry.as_sorted_key()); self.update_deps_for_remove(&entry); entry @@ -358,7 +358,7 @@ impl SortedTxMap { // We're not recursively removing a tx and all its descendants // So we need update statistics state for desc_id in &descendants { - if let Some(desc_entry) = self.entries.get_mut(&desc_id) { + if let Some(desc_entry) = self.entries.get_mut(desc_id) { let deleted = self.sorted_index.remove(&desc_entry.as_sorted_key()); debug_assert!(deleted, "pool inconsistent"); desc_entry.sub_entry_weight(&entry); diff --git a/tx-pool/src/component/pending.rs b/tx-pool/src/component/pending.rs index 5558922174..aa9aff1c31 100644 --- a/tx-pool/src/component/pending.rs +++ b/tx-pool/src/component/pending.rs @@ -215,7 +215,7 @@ impl PendingQueue { } } for entry in &removed { - self.remove_entry_relation(&entry); + self.remove_entry_relation(entry); } removed @@ -236,7 +236,7 @@ impl PendingQueue { if proposals.len() == limit { break; } - if !exclusion.contains(&id) { + if !exclusion.contains(id) { proposals.insert(id.clone()); } } diff --git a/tx-pool/src/component/proposed.rs b/tx-pool/src/component/proposed.rs index a1973ab145..3f484b9447 100644 --- a/tx-pool/src/component/proposed.rs +++ b/tx-pool/src/component/proposed.rs @@ -195,7 +195,7 @@ impl ProposedPool { } for d in entry.related_dep_out_points() { - self.edges.delete_txid_by_dep(d, &id); + self.edges.delete_txid_by_dep(d, id); } for o in outputs { @@ -233,7 +233,7 @@ impl ProposedPool { } for d in related_out_points { - self.edges.delete_txid_by_dep(&d, &id); + self.edges.delete_txid_by_dep(d, &id); } self.edges.header_deps.remove(&id); @@ -350,12 +350,12 @@ impl ProposedPool { /// find all ancestors from pool pub fn calc_ancestors(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_ancestors(&tx_short_id) + self.inner.calc_ancestors(tx_short_id) } /// find all descendants from pool pub fn calc_descendants(&self, tx_short_id: &ProposalShortId) -> HashSet { - self.inner.calc_descendants(&tx_short_id) + self.inner.calc_descendants(tx_short_id) } pub(crate) fn clear(&mut self) { diff --git a/tx-pool/src/component/recent_reject.rs b/tx-pool/src/component/recent_reject.rs index da0d68ed8f..08dbc3eb3a 100644 --- a/tx-pool/src/component/recent_reject.rs +++ b/tx-pool/src/component/recent_reject.rs @@ -38,7 +38,7 @@ impl RecentReject { let db = DBWithTTL::open_cf(path, cf_names.clone(), ttl)?; let estimate_keys_num = cf_names .iter() - .map(|cf| db.estimate_num_keys_cf(&cf)) + .map(|cf| db.estimate_num_keys_cf(cf)) .collect::, _>>()?; let total_keys_num = estimate_keys_num.iter().map(|num| num.unwrap_or(0)).sum(); diff --git a/tx-pool/src/component/tests/pending.rs b/tx-pool/src/component/tests/pending.rs index 32ffaa5990..689302014b 100644 --- a/tx-pool/src/component/tests/pending.rs +++ b/tx-pool/src/component/tests/pending.rs @@ -4,7 +4,6 @@ use crate::component::tests::util::{ use crate::component::{entry::TxEntry, pending::PendingQueue}; use ckb_types::{h256, packed::Byte32, prelude::*}; use std::collections::HashSet; -use std::iter::FromIterator; #[test] fn test_basic() { diff --git a/tx-pool/src/component/tests/proposed.rs b/tx-pool/src/component/tests/proposed.rs index 9cee0449ba..ad573bc061 100644 --- a/tx-pool/src/component/tests/proposed.rs +++ b/tx-pool/src/component/tests/proposed.rs @@ -14,7 +14,6 @@ use ckb_types::{ prelude::*, }; use std::collections::HashSet; -use std::iter::FromIterator; fn dummy_resolve Option>( tx: TransactionView, diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 0a0ed863ea..99cf36b865 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -43,7 +43,6 @@ use ckb_verification::{ use faketime::unix_time_as_millis; use std::collections::HashSet; use std::collections::{HashMap, VecDeque}; -use std::convert::TryInto; use std::sync::atomic::Ordering; use std::sync::{atomic::AtomicU64, Arc}; use std::time::Duration; @@ -521,7 +520,7 @@ impl TxPoolService { ) -> (Result<(), Reject>, Arc) { let (ret, snapshot) = self .with_tx_pool_write_lock(move |tx_pool, snapshot| { - check_tx_cycle_limit(&tx_pool, verified.cycles)?; + check_tx_cycle_limit(tx_pool, verified.cycles)?; // if snapshot changed by context switch // we need redo time_relative verify @@ -535,11 +534,11 @@ impl TxPoolService { ); // destructuring assignments are not currently supported - status = check_rtx(&tx_pool, &snapshot, &entry.rtx)?; + status = check_rtx(tx_pool, snapshot, &entry.rtx)?; let tip_header = snapshot.tip_header(); let tx_env = status.with_env(tip_header); - time_relative_verify(&snapshot, &entry.rtx, &tx_env)?; + time_relative_verify(snapshot, &entry.rtx, &tx_env)?; } _submit_entry(tx_pool, status, entry.clone(), &self.callbacks)?; @@ -593,13 +592,13 @@ impl TxPoolService { let (ret, snapshot) = self .with_tx_pool_read_lock(|tx_pool, snapshot| { let tip_hash = snapshot.tip_hash(); - check_tx_size_limit(&tx_pool, tx_size)?; + check_tx_size_limit(tx_pool, tx_size)?; - check_txid_collision(&tx_pool, &tx)?; + check_txid_collision(tx_pool, tx)?; - let (rtx, status) = resolve_tx(&tx_pool, &snapshot, tx.clone())?; + let (rtx, status) = resolve_tx(tx_pool, snapshot, tx.clone())?; - let fee = check_tx_fee(&tx_pool, &snapshot, &rtx, tx_size)?; + let fee = check_tx_fee(tx_pool, snapshot, &rtx, tx_size)?; Ok((tip_hash, rtx, status, fee, tx_size)) }) @@ -745,14 +744,14 @@ impl TxPoolService { } Err(reject) => { debug!("after_process {} reject: {} ", tx_hash, reject); - if is_missing_input(&reject) && all_inputs_is_unknown(snapshot, &tx) { + if is_missing_input(reject) && all_inputs_is_unknown(snapshot, &tx) { self.add_orphan(tx, peer, declared_cycle).await; } else { if reject.is_malformed_tx() { self.ban_malformed(peer, format!("reject {}", reject)); } if matches!(reject, Reject::Resolve(..) | Reject::Verification(..)) { - self.put_recent_reject(&tx_hash, &reject).await; + self.put_recent_reject(&tx_hash, reject).await; } self.send_result_to_relayer(TxVerificationResult::Reject { tx_hash }); } @@ -778,7 +777,7 @@ impl TxPoolService { } Err(reject) => { if matches!(reject, Reject::Resolve(..) | Reject::Verification(..)) { - self.put_recent_reject(&tx_hash, &reject).await; + self.put_recent_reject(&tx_hash, reject).await; } } } @@ -1090,9 +1089,9 @@ impl TxPoolService { let mut attached = LinkedHashSet::default(); let hardfork_switch = snapshot.consensus().hardfork_switch(); let hardfork_during_detach = - check_if_hardfork_during_blocks(&hardfork_switch, &detached_blocks); + check_if_hardfork_during_blocks(hardfork_switch, &detached_blocks); let hardfork_during_attach = - check_if_hardfork_during_blocks(&hardfork_switch, &attached_blocks); + check_if_hardfork_during_blocks(hardfork_switch, &attached_blocks); let new_tip_after_delay = after_delay_window(&snapshot); let epoch_of_next_block = snapshot diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index e3c5004b8d..01c33cd8e0 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -59,7 +59,7 @@ pub(crate) struct Notify { } impl Notify { - pub(crate) fn notify(arguments: A) -> Notify { + pub(crate) fn new(arguments: A) -> Notify { Notify { arguments } } } @@ -202,7 +202,7 @@ impl TxPoolController { /// Notify new uncle pub fn notify_new_uncle(&self, uncle: UncleBlockView) -> Result<(), AnyError> { - let notify = Notify::notify(uncle); + let notify = Notify::new(uncle); self.sender .try_send(Message::NewUncle(notify)) .map_err(|e| { @@ -222,7 +222,7 @@ impl TxPoolController { detached_proposal_id: HashSet, snapshot: Arc, ) -> Result<(), AnyError> { - let notify = Notify::notify(( + let notify = Notify::new(( detached_blocks, attached_blocks, detached_proposal_id, @@ -301,7 +301,7 @@ impl TxPoolController { /// Receive txs from network, try to add txs to tx-pool pub fn notify_txs(&self, txs: Vec) -> Result<(), AnyError> { - let notify = Notify::notify(txs); + let notify = Notify::new(txs); self.sender .try_send(Message::NotifyTxs(notify)) .map_err(|e| { @@ -823,7 +823,7 @@ async fn process(mut service: TxPoolService, message: Message) { arguments: mut proposals, }) => { let tx_pool = service.tx_pool.read().await; - proposals.retain(|id| !tx_pool.contains_proposal_id(&id)); + proposals.retain(|id| !tx_pool.contains_proposal_id(id)); if let Err(e) = responder.send(proposals) { error!("responder send fresh_proposals_filter failed {:?}", e); }; @@ -916,11 +916,9 @@ async fn process(mut service: TxPoolService, message: Message) { let txs = short_ids .into_iter() .filter_map(|short_id| { - if let Some(tx) = tx_pool.get_tx_from_pool_or_store(&short_id) { - Some((short_id, tx)) - } else { - None - } + tx_pool + .get_tx_from_pool_or_store(&short_id) + .map(|tx| (short_id, tx)) }) .collect(); if let Err(e) = responder.send(txs) { diff --git a/tx-pool/src/util.rs b/tx-pool/src/util.rs index fa7da19375..748a5ae45c 100644 --- a/tx-pool/src/util.rs +++ b/tx-pool/src/util.rs @@ -44,7 +44,7 @@ pub(crate) fn check_tx_fee( tx_size: usize, ) -> Result { let fee = DaoCalculator::new(snapshot.consensus(), &snapshot.as_data_provider()) - .transaction_fee(&rtx) + .transaction_fee(rtx) .map_err(|err| Reject::Malformed(format!("Transaction fee calculate overflow: {}", err)))?; let min_fee = tx_pool.config.min_fee_rate.fee(tx_size); // reject txs which fee lower than min fee rate @@ -84,13 +84,13 @@ pub(crate) fn verify_rtx( if let Some(ref cached) = cache_entry { match cached { CacheEntry::Completed(completed) => { - TimeRelativeTransactionVerifier::new(&rtx, consensus, snapshot, tx_env) + TimeRelativeTransactionVerifier::new(rtx, consensus, snapshot, tx_env) .verify() .map(|_| *completed) .map_err(Reject::Verification) } CacheEntry::Suspended(suspended) => ContextualTransactionVerifier::new( - &rtx, + rtx, consensus, &snapshot.as_data_provider(), tx_env, @@ -100,14 +100,9 @@ pub(crate) fn verify_rtx( } } else { block_in_place(|| { - ContextualTransactionVerifier::new( - &rtx, - consensus, - &snapshot.as_data_provider(), - tx_env, - ) - .verify(max_tx_verify_cycles, false) - .map_err(Reject::Verification) + ContextualTransactionVerifier::new(rtx, consensus, &snapshot.as_data_provider(), tx_env) + .verify(max_tx_verify_cycles, false) + .map_err(Reject::Verification) }) } } diff --git a/util/Cargo.toml b/util/Cargo.toml index e4ee91ddd3..ff6d842a13 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-util" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB utilities library." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/app-config/Cargo.toml b/util/app-config/Cargo.toml index 959d37848e..de4e675a12 100644 --- a/util/app-config/Cargo.toml +++ b/util/app-config/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-app-config" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "CKB command line arguments and config options." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/app-config/src/app_config.rs b/util/app-config/src/app_config.rs index 7a8047d5ea..a8f25c9e0d 100644 --- a/util/app-config/src/app_config.rs +++ b/util/app-config/src/app_config.rs @@ -256,7 +256,7 @@ impl AppConfig { impl CKBAppConfig { /// Load a new instance from a file pub fn load_from_slice(slice: &[u8]) -> Result { - let legacy_config: legacy::CKBAppConfig = toml::from_slice(&slice)?; + let legacy_config: legacy::CKBAppConfig = toml::from_slice(slice)?; for field in legacy_config.deprecated_fields() { eprintln!( "WARN: the option \"{}\" in configuration files is deprecated since v{}.", @@ -309,7 +309,7 @@ impl CKBAppConfig { impl MinerAppConfig { /// Load a new instance from a file. pub fn load_from_slice(slice: &[u8]) -> Result { - let legacy_config: legacy::MinerAppConfig = toml::from_slice(&slice)?; + let legacy_config: legacy::MinerAppConfig = toml::from_slice(slice)?; for field in legacy_config.deprecated_fields() { eprintln!( "WARN: the option \"{}\" in configuration files is deprecated since v{}.", diff --git a/util/app-config/src/configs/network.rs b/util/app-config/src/configs/network.rs index 002514d8f0..cb14a226ea 100644 --- a/util/app-config/src/configs/network.rs +++ b/util/app-config/src/configs/network.rs @@ -165,7 +165,7 @@ pub(crate) fn write_secret_to_file(secret: &[u8], path: PathBuf) -> Result<(), E .write(true) .open(path) .and_then(|mut file| { - file.write_all(&secret)?; + file.write_all(secret)?; #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; diff --git a/util/app-config/src/lib.rs b/util/app-config/src/lib.rs index ac47f07704..31a7449503 100644 --- a/util/app-config/src/lib.rs +++ b/util/app-config/src/lib.rs @@ -60,7 +60,7 @@ impl Setup { let mut config = AppConfig::load_for_subcommand(&root_dir, subcommand_name)?; config.set_bin_name(bin_name); #[cfg(feature = "with_sentry")] - let is_sentry_enabled = is_daemon(&subcommand_name) && config.sentry().is_enabled(); + let is_sentry_enabled = is_daemon(subcommand_name) && config.sentry().is_enabled(); Ok(Setup { subcommand_name: subcommand_name.to_string(), diff --git a/util/app-config/src/sentry_config.rs b/util/app-config/src/sentry_config.rs index f76fb5f759..e8937b14a2 100644 --- a/util/app-config/src/sentry_config.rs +++ b/util/app-config/src/sentry_config.rs @@ -18,7 +18,7 @@ pub struct SentryConfig { impl SentryConfig { pub fn init(&self, version: &Version) -> ClientInitGuard { - let guard = init(self.build_sentry_client_options(&version)); + let guard = init(self.build_sentry_client_options(version)); if guard.is_enabled() { configure_scope(|scope| { scope.set_tag("release.pre", version.is_pre()); diff --git a/util/app-config/src/tests/app_config.rs b/util/app-config/src/tests/app_config.rs index 2839e41f56..e263057cc2 100644 --- a/util/app-config/src/tests/app_config.rs +++ b/util/app-config/src/tests/app_config.rs @@ -96,8 +96,8 @@ fn test_log_to_stdout_only() { let ckb_config = app_config .into_ckb() .unwrap_or_else(|err| std::panic::panic_any(err)); - assert_eq!(ckb_config.logger.log_to_file, false); - assert_eq!(ckb_config.logger.log_to_stdout, true); + assert!(!ckb_config.logger.log_to_file); + assert!(ckb_config.logger.log_to_stdout); } { Resource::bundled_miner_config() @@ -108,8 +108,8 @@ fn test_log_to_stdout_only() { let miner_config = app_config .into_miner() .unwrap_or_else(|err| std::panic::panic_any(err)); - assert_eq!(miner_config.logger.log_to_file, false); - assert_eq!(miner_config.logger.log_to_stdout, true); + assert!(!miner_config.logger.log_to_file); + assert!(miner_config.logger.log_to_stdout); } } diff --git a/util/build-info/Cargo.toml b/util/build-info/Cargo.toml index 9fb055dc65..2b2b4ecfa5 100644 --- a/util/build-info/Cargo.toml +++ b/util/build-info/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-build-info" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB cargo build information." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/chain-iter/Cargo.toml b/util/chain-iter/Cargo.toml index b312f24f08..88aef35b0b 100644 --- a/util/chain-iter/Cargo.toml +++ b/util/chain-iter/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-chain-iter" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @quake crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/channel/Cargo.toml b/util/channel/Cargo.toml index 9f19affbd2..733f6debd3 100644 --- a/util/channel/Cargo.toml +++ b/util/channel/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-channel" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Channel wrapper." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/constant/Cargo.toml b/util/constant/Cargo.toml index c2add49137..37be26a849 100644 --- a/util/constant/Cargo.toml +++ b/util/constant/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-constant" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Ckb constant container" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/crypto/Cargo.toml b/util/crypto/Cargo.toml index cc0068c36f..164484e35e 100644 --- a/util/crypto/Cargo.toml +++ b/util/crypto/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-crypto" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The ckb crypto util" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/dao/Cargo.toml b/util/dao/Cargo.toml index 7e7cd329a7..7fb02f4951 100644 --- a/util/dao/Cargo.toml +++ b/util/dao/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-dao" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "This crate provides implementation to calculate dao field" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/dao/src/lib.rs b/util/dao/src/lib.rs index 875fe380c0..ea5f397921 100644 --- a/util/dao/src/lib.rs +++ b/util/dao/src/lib.rs @@ -14,7 +14,6 @@ use ckb_types::{ prelude::*, }; use std::collections::HashSet; -use std::convert::TryFrom; #[cfg(test)] mod tests; @@ -101,7 +100,7 @@ impl<'a, DL: CellDataProvider + EpochProvider + HeaderProvider> DaoCalculator<'a // in the cellbase of current block. let current_block_epoch = self .consensus - .next_epoch_ext(&parent, self.data_loader) + .next_epoch_ext(parent, self.data_loader) .ok_or(DaoError::InvalidHeader)? .epoch(); let current_block_number = parent.number() + 1; @@ -168,7 +167,7 @@ impl<'a, DL: CellDataProvider + EpochProvider + HeaderProvider> DaoCalculator<'a rtx.resolved_inputs .iter() .try_fold(Capacity::zero(), |capacities, cell_meta| { - let current_capacity = modified_occupied_capacity(&cell_meta, &self.consensus); + let current_capacity = modified_occupied_capacity(cell_meta, self.consensus); current_capacity.and_then(|c| capacities.safe_add(c)) }) .map_err(Into::into) @@ -211,7 +210,7 @@ impl<'a, DL: CellDataProvider + EpochProvider + HeaderProvider> DaoCalculator<'a == self.consensus.dao_type_hash().expect("No dao system cell") }; let is_withdrawing_input = - |cell_meta: &CellMeta| match self.data_loader.load_cell_data(&cell_meta) { + |cell_meta: &CellMeta| match self.data_loader.load_cell_data(cell_meta) { Some(data) => data.len() == 8 && LittleEndian::read_u64(&data) > 0, None => false, }; @@ -220,13 +219,13 @@ impl<'a, DL: CellDataProvider + EpochProvider + HeaderProvider> DaoCalculator<'a .to_opt() .map(is_dao_type_script) .unwrap_or(false) - && is_withdrawing_input(&cell_meta) + && is_withdrawing_input(cell_meta) { let withdrawing_header_hash = cell_meta .transaction_info .as_ref() .map(|info| &info.block_hash) - .filter(|hash| header_deps.contains(&hash)) + .filter(|hash| header_deps.contains(hash)) .ok_or(DaoError::InvalidOutPoint)?; let deposit_header_hash = rtx .transaction @@ -259,10 +258,10 @@ impl<'a, DL: CellDataProvider + EpochProvider + HeaderProvider> DaoCalculator<'a .ok_or(DaoError::InvalidOutPoint) })?; self.calculate_maximum_withdraw( - &output, + output, Capacity::bytes(cell_meta.data_bytes as usize)?, - &deposit_header_hash, - &withdrawing_header_hash, + deposit_header_hash, + withdrawing_header_hash, ) } else { Ok(output.capacity().unpack()) diff --git a/util/dao/utils/Cargo.toml b/util/dao/utils/Cargo.toml index 72cd7bc722..ea780b522e 100644 --- a/util/dao/utils/Cargo.toml +++ b/util/dao/utils/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-dao-utils" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "This crate provides several util functions to operate the dao field and NervosDAO related errors" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/Cargo.toml b/util/fixed-hash/Cargo.toml index cc3b6578d2..f56d98701a 100644 --- a/util/fixed-hash/Cargo.toml +++ b/util/fixed-hash/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-fixed-hash" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "Provide several simple fixed-sized hash data type and their static constructors." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/core/Cargo.toml b/util/fixed-hash/core/Cargo.toml index ecb42dd8ab..e8ff14491a 100644 --- a/util/fixed-hash/core/Cargo.toml +++ b/util/fixed-hash/core/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-fixed-hash-core" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "Provide several fixed-length binary data, aka fixed-sized hashes." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/fixed-hash/macros/Cargo.toml b/util/fixed-hash/macros/Cargo.toml index 43c7c0d0c4..3746a020ea 100644 --- a/util/fixed-hash/macros/Cargo.toml +++ b/util/fixed-hash/macros/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-fixed-hash-macros" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "Provide several proc-macros to construct const fixed-sized hashes." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/hash/Cargo.toml b/util/hash/Cargo.toml index 490fa4f0f3..633feeba9e 100644 --- a/util/hash/Cargo.toml +++ b/util/hash/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-hash" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB default hash function." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/instrument/Cargo.toml b/util/instrument/Cargo.toml index 254e19a603..70047b54bc 100644 --- a/util/instrument/Cargo.toml +++ b/util/instrument/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-instrument" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB tool to import/export chain data." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/jsonrpc-types/Cargo.toml b/util/jsonrpc-types/Cargo.toml index f7338604a3..31f5d9c7d2 100644 --- a/util/jsonrpc-types/Cargo.toml +++ b/util/jsonrpc-types/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-jsonrpc-types" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB common types for JSON serialization." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/jsonrpc-types/src/blockchain.rs b/util/jsonrpc-types/src/blockchain.rs index ef984e4eaf..ac1b9dce50 100644 --- a/util/jsonrpc-types/src/blockchain.rs +++ b/util/jsonrpc-types/src/blockchain.rs @@ -5,7 +5,6 @@ use crate::{ }; use ckb_types::{core, packed, prelude::*, H256}; use serde::{Deserialize, Serialize}; -use std::convert::TryFrom; use std::fmt; /// Specifies how the script `code_hash` is used to match the script code and how to run the code. diff --git a/util/jsonrpc-types/src/bytes.rs b/util/jsonrpc-types/src/bytes.rs index 77cbc8d31d..8034a19b05 100644 --- a/util/jsonrpc-types/src/bytes.rs +++ b/util/jsonrpc-types/src/bytes.rs @@ -107,7 +107,7 @@ impl serde::Serialize for JsonBytes { let mut buffer = vec![0u8; self.len() * 2 + 2]; buffer[0] = b'0'; buffer[1] = b'x'; - hex_encode(&self.as_bytes(), &mut buffer[2..]) + hex_encode(self.as_bytes(), &mut buffer[2..]) .map_err(|e| serde::ser::Error::custom(&format!("{}", e)))?; serializer.serialize_str(unsafe { ::std::str::from_utf8_unchecked(&buffer) }) } diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 3f0c0f4c07..9fa52ea892 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -75,7 +75,7 @@ impl From for PoolTransactionEntry { pub enum OutputsValidator { /// "passthrough": the default validator, bypass output checking, thus allow any kind of transaction outputs. Passthrough, - /// "well_known_scripts_only": restricts the lock script and type script usage, see more information on https://github.com/nervosnetwork/ckb/wiki/Transaction-%C2%BB-Default-Outputs-Validator + /// "well_known_scripts_only": restricts the lock script and type script usage, see more information on WellKnownScriptsOnly, } diff --git a/util/jsonrpc-types/src/tests/blockchain.rs b/util/jsonrpc-types/src/tests/blockchain.rs index 3ca3866eab..4a17b1f1f2 100644 --- a/util/jsonrpc-types/src/tests/blockchain.rs +++ b/util/jsonrpc-types/src/tests/blockchain.rs @@ -95,7 +95,7 @@ fn test_script_serialization() { }, ), ] { - let decoded: Script = serde_json::from_str(&original).unwrap(); + let decoded: Script = serde_json::from_str(original).unwrap(); assert_eq!(&decoded, entity); let encoded = serde_json::to_string(&decoded).unwrap(); assert_eq!(&encoded, original); @@ -132,7 +132,7 @@ fn test_script_serialization() { \"args\":\"0x\"\ }", ] { - let result: Result = serde_json::from_str(&malformed); + let result: Result = serde_json::from_str(malformed); assert!( result.is_err(), "should reject malformed json: [{}]", diff --git a/util/launcher/Cargo.toml b/util/launcher/Cargo.toml index 2a8a83a5e8..58f44fc67b 100644 --- a/util/launcher/Cargo.toml +++ b/util/launcher/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-launcher" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB tool to import/export chain data." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/launcher/migration-template/Cargo.toml b/util/launcher/migration-template/Cargo.toml index 7c4b49dcdf..9a7b67cdc3 100644 --- a/util/launcher/migration-template/Cargo.toml +++ b/util/launcher/migration-template/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-migration-template" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "Provide proc-macros to setup migration." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/launcher/src/migrate.rs b/util/launcher/src/migrate.rs index 6b7afeda3a..42de1c0187 100644 --- a/util/launcher/src/migrate.rs +++ b/util/launcher/src/migrate.rs @@ -48,12 +48,12 @@ impl Migrate { /// - Greater: The database version is greater than the matched version of the executable binary. /// Requires upgrade the executable binary. pub fn check(&self, db: &ReadOnlyDB) -> Ordering { - self.migrations.check(&db) + self.migrations.check(db) } /// Check whether database requires expensive migrations. pub fn require_expensive(&self, db: &ReadOnlyDB) -> bool { - self.migrations.expensive(&db) + self.migrations.expensive(db) } /// Open bulk load db. diff --git a/util/launcher/src/shared_builder.rs b/util/launcher/src/shared_builder.rs index 6e884f232a..c473ed380f 100644 --- a/util/launcher/src/shared_builder.rs +++ b/util/launcher/src/shared_builder.rs @@ -270,7 +270,7 @@ impl SharedBuilder { .into()) } } - None => store.init(&consensus).map(|_| { + None => store.init(consensus).map(|_| { ( consensus.genesis_block().header(), consensus.genesis_epoch_ext().to_owned(), @@ -283,12 +283,12 @@ impl SharedBuilder { store: &ChainDB, consensus: Arc, ) -> Result<(Snapshot, ProposalTable), Error> { - let (tip_header, epoch) = Self::init_store(&store, &consensus)?; + let (tip_header, epoch) = Self::init_store(store, &consensus)?; let total_difficulty = store .get_block_ext(&tip_header.hash()) .ok_or_else(|| InternalErrorKind::Database.other("failed to get tip's block_ext"))? .total_difficulty; - let (proposal_table, proposal_view) = Self::init_proposal_table(&store, &consensus); + let (proposal_table, proposal_view) = Self::init_proposal_table(store, &consensus); let snapshot = Snapshot::new( tip_header, @@ -315,10 +315,10 @@ impl SharedBuilder { async_handle, } = self; - let tx_pool_config = tx_pool_config.unwrap_or_else(Default::default); - let notify_config = notify_config.unwrap_or_else(Default::default); - let store_config = store_config.unwrap_or_else(Default::default); - let consensus = Arc::new(consensus.unwrap_or_else(Consensus::default)); + let tx_pool_config = tx_pool_config.unwrap_or_default(); + let notify_config = notify_config.unwrap_or_default(); + let store_config = store_config.unwrap_or_default(); + let consensus = Arc::new(consensus.unwrap_or_default()); let notify_controller = start_notify_service(notify_config); diff --git a/util/logger-config/Cargo.toml b/util/logger-config/Cargo.toml index 917dd147e0..ae890bb241 100644 --- a/util/logger-config/Cargo.toml +++ b/util/logger-config/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-logger-config" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "CKB logger configurations." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/logger-service/Cargo.toml b/util/logger-service/Cargo.toml index 6f1a4723a0..79d2f513f5 100644 --- a/util/logger-service/Cargo.toml +++ b/util/logger-service/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-logger-service" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "CKB logger and logging service." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/logger-service/src/tests.rs b/util/logger-service/src/tests.rs index 49e22a70ec..e974562e17 100644 --- a/util/logger-service/src/tests.rs +++ b/util/logger-service/src/tests.rs @@ -4,19 +4,19 @@ use crate::convert_compatible_crate_name; fn test_convert_compatible_crate_name() { let spec = "info,a-b=trace,c-d_e-f=warn,g-h-i=debug,jkl=trace/*[0-9]"; let expected = "info,a-b=trace,a_b=trace,c-d_e-f=warn,c_d_e_f=warn,g-h-i=debug,g_h_i=debug,jkl=trace/*[0-9]"; - let result = convert_compatible_crate_name(&spec); + let result = convert_compatible_crate_name(spec); assert_eq!(&result, &expected); let spec = "info,a-b=trace,c-d_e-f=warn,g-h-i=debug,jkl=trace"; let expected = "info,a-b=trace,a_b=trace,c-d_e-f=warn,c_d_e_f=warn,g-h-i=debug,g_h_i=debug,jkl=trace"; - let result = convert_compatible_crate_name(&spec); + let result = convert_compatible_crate_name(spec); assert_eq!(&result, &expected); let spec = "info/*[0-9]"; let expected = "info/*[0-9]"; - let result = convert_compatible_crate_name(&spec); + let result = convert_compatible_crate_name(spec); assert_eq!(&result, &expected); let spec = "info"; let expected = "info"; - let result = convert_compatible_crate_name(&spec); + let result = convert_compatible_crate_name(spec); assert_eq!(&result, &expected); } diff --git a/util/logger-service/tests/has_panic_info.rs b/util/logger-service/tests/has_panic_info.rs index f60f0e223a..23f86f479a 100644 --- a/util/logger-service/tests/has_panic_info.rs +++ b/util/logger-service/tests/has_panic_info.rs @@ -33,12 +33,12 @@ fn has_panic_info() { assert!(utils::has_line_in_log_file( &log_file, Level::Error, - &panic_line_content_2 + panic_line_content_2 )); assert!(utils::has_line_in_log_file( &log_file, Level::Error, - &line_content + line_content )); } diff --git a/util/logger/Cargo.toml b/util/logger/Cargo.toml index 505804ffcc..4eb7997cee 100644 --- a/util/logger/Cargo.toml +++ b/util/logger/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-logger" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "CKB logging facade." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/memory-tracker/Cargo.toml b/util/memory-tracker/Cargo.toml index b7563618b4..bc669bda7f 100644 --- a/util/memory-tracker/Cargo.toml +++ b/util/memory-tracker/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-memory-tracker" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "Track the memory usage of CKB." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/metrics-config/Cargo.toml b/util/metrics-config/Cargo.toml index 714b9bd7d8..7379c627a4 100644 --- a/util/metrics-config/Cargo.toml +++ b/util/metrics-config/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-metrics-config" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "CKB metrics configurations." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/metrics-service/Cargo.toml b/util/metrics-service/Cargo.toml index 54346fa0bc..4889476f3c 100644 --- a/util/metrics-service/Cargo.toml +++ b/util/metrics-service/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-metrics-service" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "The service which handle the metrics data in CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/metrics/Cargo.toml b/util/metrics/Cargo.toml index 601865455c..9f83f7e634 100644 --- a/util/metrics/Cargo.toml +++ b/util/metrics/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-metrics" version = "0.102.0-pre" license = "MIT" authors = ["Nervos "] -edition = "2018" +edition = "2021" description = "A lightweight metrics facade used in CKB." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/multisig/Cargo.toml b/util/multisig/Cargo.toml index 40481bc258..14979785af 100644 --- a/util/multisig/Cargo.toml +++ b/util/multisig/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-multisig" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB multi-signature library." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/multisig/src/secp256k1.rs b/util/multisig/src/secp256k1.rs index c8e7c1f5f9..d37200939f 100644 --- a/util/multisig/src/secp256k1.rs +++ b/util/multisig/src/secp256k1.rs @@ -33,7 +33,7 @@ where &sig.serialize()[..], message.as_ref() ); - match sig.recover(&message) { + match sig.recover(message) { Ok(pubkey) => Some(pubkey), Err(err) => { debug!("recover secp256k1 sig error: {}", err); diff --git a/util/network-alert/Cargo.toml b/util/network-alert/Cargo.toml index c51261cacd..d03f285e1b 100644 --- a/util/network-alert/Cargo.toml +++ b/util/network-alert/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-network-alert" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The alert protocol implementation" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/network-alert/src/notifier.rs b/util/network-alert/src/notifier.rs index b837e69b0c..1210b8f9a0 100644 --- a/util/network-alert/src/notifier.rs +++ b/util/network-alert/src/notifier.rs @@ -83,7 +83,7 @@ impl Notifier { self.received_alerts.insert(alert_id, alert.clone()); // check conditions, figure out do we need to notice this alert - if !self.is_version_effective(&alert) { + if !self.is_version_effective(alert) { debug!("received a version ineffective alert {:?}", alert); return; } diff --git a/util/occupied-capacity/Cargo.toml b/util/occupied-capacity/Cargo.toml index 4043caaf3b..1c2a97b256 100644 --- a/util/occupied-capacity/Cargo.toml +++ b/util/occupied-capacity/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-occupied-capacity" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/occupied-capacity/core/Cargo.toml b/util/occupied-capacity/core/Cargo.toml index a6adfd7285..2a5276a820 100644 --- a/util/occupied-capacity/core/Cargo.toml +++ b/util/occupied-capacity/core/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-occupied-capacity-core" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/occupied-capacity/macros/Cargo.toml b/util/occupied-capacity/macros/Cargo.toml index 8963af2e0c..ba533eb759 100644 --- a/util/occupied-capacity/macros/Cargo.toml +++ b/util/occupied-capacity/macros/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-occupied-capacity-macros" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/proposal-table/Cargo.toml b/util/proposal-table/Cargo.toml index d683753276..12bc2229e0 100644 --- a/util/proposal-table/Cargo.toml +++ b/util/proposal-table/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-proposal-table" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html description = "The CKB proposal table" diff --git a/util/rational/Cargo.toml b/util/rational/Cargo.toml index 75bc732028..725423900c 100644 --- a/util/rational/Cargo.toml +++ b/util/rational/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-rational" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Rational numbers." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/rational/src/lib.rs b/util/rational/src/lib.rs index 7b1ae1653f..6c90310a71 100644 --- a/util/rational/src/lib.rs +++ b/util/rational/src/lib.rs @@ -168,7 +168,7 @@ impl Mul<&U256> for &RationalU256 { type Output = RationalU256; #[inline] fn mul(self, rhs: &U256) -> RationalU256 { - let gcd = self.denom.gcd(&rhs); + let gcd = self.denom.gcd(rhs); RationalU256::new_raw(&self.numer * (rhs.div(&gcd)), (&self.denom).div(gcd)) } } @@ -225,7 +225,7 @@ impl Div for &RationalU256 { #[inline] fn div(self, rhs: RationalU256) -> RationalU256 { - (&self).div(&rhs) + self.div(&rhs) } } @@ -244,7 +244,7 @@ impl Div<&U256> for &RationalU256 { #[inline] fn div(self, rhs: &U256) -> RationalU256 { - let gcd = self.numer.gcd(&rhs); + let gcd = self.numer.gcd(rhs); RationalU256::new_raw(&self.numer / &gcd, &self.denom * (rhs / gcd)) } } @@ -386,7 +386,7 @@ impl Sub for &RationalU256 { type Output = RationalU256; #[inline] fn sub(self, rhs: RationalU256) -> RationalU256 { - (&self).sub(&rhs) + self.sub(&rhs) } } diff --git a/util/reward-calculator/Cargo.toml b/util/reward-calculator/Cargo.toml index 1c3a9f02a4..db4a23dc19 100644 --- a/util/reward-calculator/Cargo.toml +++ b/util/reward-calculator/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-reward-calculator" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/reward-calculator/src/lib.rs b/util/reward-calculator/src/lib.rs index 7b693e9f6a..9c269f7ccb 100644 --- a/util/reward-calculator/src/lib.rs +++ b/util/reward-calculator/src/lib.rs @@ -255,7 +255,7 @@ impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { fn base_block_reward(&self, target: &HeaderView) -> Result<(Capacity, Capacity), DaoError> { let data_loader = self.store.as_data_provider(); - let calculator = DaoCalculator::new(&self.consensus, &data_loader); + let calculator = DaoCalculator::new(self.consensus, &data_loader); let primary_block_reward = calculator.primary_block_reward(target)?; let secondary_block_reward = calculator.secondary_block_reward(target)?; @@ -264,7 +264,7 @@ impl<'a, CS: ChainStore<'a>> RewardCalculator<'a, CS> { fn get_proposal_ids_by_hash(&self, hash: &Byte32) -> HashSet { let mut ids_set = HashSet::new(); - if let Some(ids) = self.store.get_block_proposal_txs_ids(&hash) { + if let Some(ids) = self.store.get_block_proposal_txs_ids(hash) { ids_set.extend(ids) } if let Some(us) = self.store.get_block_uncles(hash) { diff --git a/util/reward-calculator/src/tests.rs b/util/reward-calculator/src/tests.rs index ab5a84254c..3f331e2b07 100644 --- a/util/reward-calculator/src/tests.rs +++ b/util/reward-calculator/src/tests.rs @@ -9,7 +9,6 @@ use ckb_types::{ prelude::*, }; use std::collections::HashSet; -use std::iter::FromIterator; use tempfile::TempDir; use crate::RewardCalculator; diff --git a/util/runtime/Cargo.toml b/util/runtime/Cargo.toml index ace7203dfc..484d64d9d6 100644 --- a/util/runtime/Cargo.toml +++ b/util/runtime/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-async-runtime" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "CKB async runtime wrapper." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/rust-unstable-port/Cargo.toml b/util/rust-unstable-port/Cargo.toml index 10ac7cb283..fdc1201989 100644 --- a/util/rust-unstable-port/Cargo.toml +++ b/util/rust-unstable-port/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-rust-unstable-port" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "Collection of features backport from unstable Rust." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/snapshot/Cargo.toml b/util/snapshot/Cargo.toml index 445aa807aa..4b85360b58 100644 --- a/util/snapshot/Cargo.toml +++ b/util/snapshot/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-snapshot" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The ckb rocksdb snapshot wrapper" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/spawn/Cargo.toml b/util/spawn/Cargo.toml index 2614a16253..15c5c335c7 100644 --- a/util/spawn/Cargo.toml +++ b/util/spawn/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-spawn" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Trait define spawns a new asynchronous task" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/util/stop-handler/Cargo.toml b/util/stop-handler/Cargo.toml index 3ea683ab26..b0c6810eb6 100644 --- a/util/stop-handler/Cargo.toml +++ b/util/stop-handler/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-stop-handler" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "TODO(doc): @keroro520 crate description" homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/stop-handler/src/lib.rs b/util/stop-handler/src/lib.rs index 22895ba607..d3e099cef8 100644 --- a/util/stop-handler/src/lib.rs +++ b/util/stop-handler/src/lib.rs @@ -74,8 +74,8 @@ impl Clone for Ref { #[inline] fn clone(&self) -> Ref { match self { - Self::Arc(arc) => Self::Arc(Arc::clone(&arc)), - Self::Weak(weak) => Self::Weak(Weak::clone(&weak)), + Self::Arc(arc) => Self::Arc(Arc::clone(arc)), + Self::Weak(weak) => Self::Weak(Weak::clone(weak)), } } } @@ -83,8 +83,8 @@ impl Clone for Ref { impl Ref { fn downgrade(&self) -> Ref { match self { - Self::Arc(arc) => Self::Weak(Arc::downgrade(&arc)), - Self::Weak(weak) => Self::Weak(Weak::clone(&weak)), + Self::Arc(arc) => Self::Weak(Arc::downgrade(arc)), + Self::Weak(weak) => Self::Weak(Weak::clone(weak)), } } } diff --git a/util/test-chain-utils/Cargo.toml b/util/test-chain-utils/Cargo.toml index d816af3125..2a0ef92ce9 100644 --- a/util/test-chain-utils/Cargo.toml +++ b/util/test-chain-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-test-chain-utils" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "Provide several functions used for testing." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/test-chain-utils/src/mock_store.rs b/util/test-chain-utils/src/mock_store.rs index 6a4cceba2d..1cb1923e66 100644 --- a/util/test-chain-utils/src/mock_store.rs +++ b/util/test-chain-utils/src/mock_store.rs @@ -76,8 +76,8 @@ impl MockStore { pub fn insert_block(&self, block: &BlockView, epoch_ext: &EpochExt) { let db_txn = self.store().begin_transaction(); let last_block_hash_in_previous_epoch = epoch_ext.last_block_hash_in_previous_epoch(); - db_txn.insert_block(&block).unwrap(); - db_txn.attach_block(&block).unwrap(); + db_txn.insert_block(block).unwrap(); + db_txn.attach_block(block).unwrap(); db_txn .insert_block_epoch_index(&block.hash(), &last_block_hash_in_previous_epoch) .unwrap(); @@ -104,8 +104,8 @@ impl MockStore { #[doc(hidden)] pub fn remove_block(&self, block: &BlockView) { let db_txn = self.store().begin_transaction(); - db_txn.delete_block(&block).unwrap(); - db_txn.detach_block(&block).unwrap(); + db_txn.delete_block(block).unwrap(); + db_txn.detach_block(block).unwrap(); db_txn.commit().unwrap(); } } diff --git a/util/types/Cargo.toml b/util/types/Cargo.toml index 10c86805f7..d049881068 100644 --- a/util/types/Cargo.toml +++ b/util/types/Cargo.toml @@ -2,7 +2,7 @@ name = "ckb-types" version = "0.102.0-pre" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" license = "MIT" description = "Provides the essential types for CKB." homepage = "https://github.com/nervosnetwork/ckb" diff --git a/util/types/src/core/blockchain.rs b/util/types/src/core/blockchain.rs index c4538d6a80..6e771fc98f 100644 --- a/util/types/src/core/blockchain.rs +++ b/util/types/src/core/blockchain.rs @@ -1,5 +1,4 @@ use ckb_error::OtherError; -use std::convert::{TryFrom, TryInto}; use crate::packed; diff --git a/util/types/src/core/cell.rs b/util/types/src/core/cell.rs index 4b61da96c8..ae8826a1c7 100644 --- a/util/types/src/core/cell.rs +++ b/util/types/src/core/cell.rs @@ -14,7 +14,6 @@ use ckb_error::Error; use ckb_occupied_capacity::Result as CapacityResult; use once_cell::sync::OnceCell; use std::collections::{hash_map::Entry, HashMap, HashSet}; -use std::convert::TryInto; use std::fmt; use std::hash::{BuildHasher, Hash, Hasher}; diff --git a/util/types/src/core/tests/blockchain.rs b/util/types/src/core/tests/blockchain.rs index 48bb1ce243..c849f0becd 100644 --- a/util/types/src/core/tests/blockchain.rs +++ b/util/types/src/core/tests/blockchain.rs @@ -1,5 +1,3 @@ -use std::convert::TryFrom; - use crate::{ core::{DepType, ScriptHashType}, packed, diff --git a/util/types/src/core/tests/tx_pool.rs b/util/types/src/core/tests/tx_pool.rs index a7b03bf6dc..c58f554c70 100644 --- a/util/types/src/core/tests/tx_pool.rs +++ b/util/types/src/core/tests/tx_pool.rs @@ -8,19 +8,19 @@ use crate::core::{ #[test] fn test_if_is_malformed_tx() { let reject = Reject::LowFeeRate(Default::default(), 0, 0); - assert_eq!(reject.is_malformed_tx(), false); + assert!(!reject.is_malformed_tx()); let reject = Reject::ExceededMaximumAncestorsCount; - assert_eq!(reject.is_malformed_tx(), false); + assert!(!reject.is_malformed_tx()); let reject = Reject::Full(Default::default(), 0); - assert_eq!(reject.is_malformed_tx(), false); + assert!(!reject.is_malformed_tx()); let reject = Reject::Duplicated(Default::default()); - assert_eq!(reject.is_malformed_tx(), false); + assert!(!reject.is_malformed_tx()); let reject = Reject::Malformed(Default::default()); - assert_eq!(reject.is_malformed_tx(), true); + assert!(reject.is_malformed_tx()); for error in vec![ OutPointError::Dead(Default::default()), @@ -31,7 +31,7 @@ fn test_if_is_malformed_tx() { OutPointError::ImmatureHeader(Default::default()), ] { let reject = Reject::Resolve(error); - assert_eq!(reject.is_malformed_tx(), false); + assert!(!reject.is_malformed_tx()); } for ban in vec![true, false].into_iter() { @@ -96,7 +96,7 @@ fn test_if_is_malformed_tx() { let error_kind = ErrorKind::Script; let error = error_kind.because(DefaultError); let reject = Reject::Verification(error); - assert_eq!(reject.is_malformed_tx(), true); + assert!(reject.is_malformed_tx()); } for error_kind in &[ diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index e225a82ffb..58add5da74 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -430,8 +430,8 @@ impl ExtraHashView { let extension_hash_and_extra_hash = extension_hash_opt.map(|extension_hash| { let mut ret = [0u8; 32]; let mut blake2b = new_blake2b(); - blake2b.update(&uncles_hash.as_slice()); - blake2b.update(&extension_hash.as_slice()); + blake2b.update(uncles_hash.as_slice()); + blake2b.update(extension_hash.as_slice()); blake2b.finalize(&mut ret); (extension_hash, ret.pack()) }); diff --git a/util/types/src/extension/shortcuts.rs b/util/types/src/extension/shortcuts.rs index 8709099d9a..997e542705 100644 --- a/util/types/src/extension/shortcuts.rs +++ b/util/types/src/extension/shortcuts.rs @@ -281,7 +281,7 @@ impl<'r> packed::BlockReader<'r> { /// Panics if the first extra field exists but not a valid [`BytesReader`](struct.BytesReader.html). pub fn extension(&self) -> Option { self.extra_field(0) - .map(|data| packed::BytesReader::from_slice(&data).unwrap()) + .map(|data| packed::BytesReader::from_slice(data).unwrap()) } } @@ -382,7 +382,7 @@ impl packed::CompactBlock { let prefilled_indexes: HashSet = self.prefilled_indexes_iter().collect(); (0..self.txs_len()) - .filter(|index| !prefilled_indexes.contains(&index)) + .filter(|index| !prefilled_indexes.contains(index)) .collect() } diff --git a/util/types/src/extension/tests/check_data.rs b/util/types/src/extension/tests/check_data.rs index 8524a0bd88..54bc2840ed 100644 --- a/util/types/src/extension/tests/check_data.rs +++ b/util/types/src/extension/tests/check_data.rs @@ -5,22 +5,13 @@ fn create_transaction( outputs_data: &[&[u8]], cell_deps: &[&packed::CellDep], ) -> packed::Transaction { - let outputs = outputs - .iter() - .map(|d| d.to_owned().to_owned()) - .collect::>(); - let outputs_data = outputs_data - .iter() - .map(|d| d.to_owned().to_owned().pack()) - .collect::>(); - let cell_deps = cell_deps - .iter() - .map(|d| d.to_owned().to_owned()) - .collect::>(); + let outputs_iter = outputs.iter().map(|d| d.to_owned().to_owned()); + let outputs_data_iter = outputs_data.iter().map(|d| d.to_owned().to_owned().pack()); + let cell_deps_iter = cell_deps.iter().map(|d| d.to_owned().to_owned()); let raw = packed::RawTransaction::new_builder() - .outputs(outputs.into_iter().pack()) - .outputs_data(outputs_data.into_iter().pack()) - .cell_deps(cell_deps.into_iter().pack()) + .outputs(outputs_iter.pack()) + .outputs_data(outputs_data_iter.pack()) + .cell_deps(cell_deps_iter.pack()) .build(); packed::Transaction::new_builder().raw(raw).build() } diff --git a/util/types/src/utilities/tests/difficulty.rs b/util/types/src/utilities/tests/difficulty.rs index 66ce88226f..e1b26251cf 100644 --- a/util/types/src/utilities/tests/difficulty.rs +++ b/util/types/src/utilities/tests/difficulty.rs @@ -37,7 +37,7 @@ fn test_extremes() { let compact_cause_overflow = 0xff123456; let (_, overflow) = compact_to_target(compact_cause_overflow); - assert_eq!(overflow, true); + assert!(overflow); let difficulty = compact_to_difficulty(compact_cause_overflow); assert_eq!(difficulty, U256::zero()); @@ -47,7 +47,7 @@ fn test_extremes() { fn _test_compact_overflowing(target: U256) { let compact = target_to_compact(target); let (_, overflow) = compact_to_target(compact); - assert_eq!(overflow, false, "should not overflow"); + assert!(!overflow, "should not overflow"); } #[test] @@ -55,88 +55,88 @@ fn test_compact_convert() { let (ret, overflow) = compact_to_target(0); let compact = target_to_compact(u256!("0x0")); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); assert_eq!(compact, 0); let (ret, overflow) = compact_to_target(0x123456); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x1003456); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x2000056); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x3000000); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x4000000); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x923456); assert_eq!(ret, u256!("0x0")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x1803456); assert_eq!(ret, u256!("0x80")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x2800056); assert_eq!(ret, u256!("0x8000")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x3800000); assert_eq!(ret, u256!("0x800000")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x4800000); assert_eq!(ret, u256!("0x80000000")); - assert_eq!(overflow, false); + assert!(!overflow); let (ret, overflow) = compact_to_target(0x1020000); let compact = target_to_compact(u256!("0x2")); assert_eq!(ret, u256!("0x2")); - assert_eq!(overflow, false); + assert!(!overflow); assert_eq!(compact, 0x1020000); let (ret, overflow) = compact_to_target(0x1fedcba); let compact = target_to_compact(u256!("0xfe")); assert_eq!(ret, u256!("0xfe")); - assert_eq!(overflow, false); + assert!(!overflow); assert_eq!(compact, 0x1fe0000); let (ret, overflow) = compact_to_target(0x2123456); let compact = target_to_compact(u256!("0x1234")); assert_eq!(ret, u256!("0x1234")); - assert_eq!(overflow, false); + assert!(!overflow); assert_eq!(compact, 0x2123400); let (ret, overflow) = compact_to_target(0x3123456); assert_eq!(ret, u256!("0x123456")); let compact = target_to_compact(u256!("0x123456")); - assert_eq!(overflow, false); + assert!(!overflow); assert_eq!(compact, 0x3123456); let (ret, overflow) = compact_to_target(0x4123456); assert_eq!(ret, u256!("0x12345600")); - assert_eq!(overflow, false); + assert!(!overflow); let compact = target_to_compact(u256!("0x12345600")); assert_eq!(compact, 0x4123456); let (ret, overflow) = compact_to_target(0x4923456); assert_eq!(ret, u256!("0x92345600")); - assert_eq!(overflow, false); + assert!(!overflow); let compact = target_to_compact(u256!("0x92345600")); assert_eq!(compact, 0x4923456); let (ret, overflow) = compact_to_target(0x4923400); assert_eq!(ret, u256!("0x92340000")); - assert_eq!(overflow, false); + assert!(!overflow); let compact = target_to_compact(u256!("0x92340000")); assert_eq!(compact, 0x4923400); @@ -145,14 +145,14 @@ fn test_compact_convert() { ret, u256!("0x1234560000000000000000000000000000000000000000000000000000000000") ); - assert_eq!(overflow, false); + assert!(!overflow); let compact = target_to_compact(u256!( "0x1234560000000000000000000000000000000000000000000000000000000000" )); assert_eq!(compact, 0x20123456); let (_, overflow) = compact_to_target(0xff123456); - assert_eq!(overflow, true); + assert!(overflow); } #[test] @@ -160,11 +160,11 @@ fn test_compact_overflowing2() { _test_compact_overflowing(U256::max_value()); let (_, overflow) = compact_to_target(0x21000001); - assert_eq!(overflow, true, "should overflow"); + assert!(overflow, "should overflow"); let (_, overflow) = compact_to_target(0x22000001); - assert_eq!(overflow, true, "should overflow"); + assert!(overflow, "should overflow"); let (_, overflow) = compact_to_target(0x23000001); - assert_eq!(overflow, true, "should overflow"); + assert!(overflow, "should overflow"); } proptest! { diff --git a/verification/Cargo.toml b/verification/Cargo.toml index b9357e15cf..e79545d62d 100644 --- a/verification/Cargo.toml +++ b/verification/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-verification" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The CKB verification" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/verification/contextual/Cargo.toml b/verification/contextual/Cargo.toml index a86712686f..fb25a9d44f 100644 --- a/verification/contextual/Cargo.toml +++ b/verification/contextual/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-verification-contextual" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The CKB verification contextual" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" diff --git a/verification/contextual/src/contextual_block_verifier.rs b/verification/contextual/src/contextual_block_verifier.rs index c7a20db016..29490a90cb 100644 --- a/verification/contextual/src/contextual_block_verifier.rs +++ b/verification/contextual/src/contextual_block_verifier.rs @@ -82,7 +82,7 @@ impl<'a, CS: ChainStore<'a>> HeaderChecker for VerifyContext<'a, CS> { impl<'a, CS: ChainStore<'a>> ConsensusProvider for VerifyContext<'a, CS> { fn get_consensus(&self) -> &Consensus { - &self.consensus + self.consensus } } @@ -122,7 +122,7 @@ impl<'a, 'b, CS: ChainStore<'a>> UncleProvider for UncleVerifierContext<'a, 'b, } fn epoch(&self) -> &EpochExt { - &self.epoch + self.epoch } fn consensus(&self) -> &Consensus { @@ -299,7 +299,7 @@ impl<'a, 'b, 'c, CS: ChainStore<'a>> DaoHeaderVerifier<'a, 'b, 'c, CS> { self.context.consensus, &self.context.store.as_data_provider(), ) - .dao_field(&self.resolved, self.parent) + .dao_field(self.resolved, self.parent) .map_err(|e| { error_target!( crate::LOG_TARGET, @@ -392,7 +392,7 @@ impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { if let Some(cache_entry) = fetched_cache.get(&tx_hash) { match cache_entry { CacheEntry::Completed(completed) => TimeRelativeTransactionVerifier::new( - &tx, + tx, self.context.consensus, self.context, &tx_env, @@ -407,7 +407,7 @@ impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { }) .map(|_| (tx_hash, *completed)), CacheEntry::Suspended(suspended) => ContextualTransactionVerifier::new( - &tx, + tx, self.context.consensus, &self.context.store.as_data_provider(), &tx_env, @@ -428,7 +428,7 @@ impl<'a, CS: ChainStore<'a>> BlockTxsVerifier<'a, CS> { } } else { ContextualTransactionVerifier::new( - &tx, + tx, self.context.consensus, &self.context.store.as_data_provider(), &tx_env, @@ -566,23 +566,23 @@ impl<'a, CS: ChainStore<'a>> ContextualBlockVerifier<'a, CS> { } if !switch.disable_uncles() { - let uncle_verifier_context = UncleVerifierContext::new(&self.context, &epoch_ext); + let uncle_verifier_context = UncleVerifierContext::new(self.context, &epoch_ext); UnclesVerifier::new(uncle_verifier_context, block).verify()?; } if !switch.disable_two_phase_commit() { - TwoPhaseCommitVerifier::new(&self.context, block).verify()?; + TwoPhaseCommitVerifier::new(self.context, block).verify()?; } if !switch.disable_daoheader() { - DaoHeaderVerifier::new(&self.context, resolved, &parent, &block.header()).verify()?; + DaoHeaderVerifier::new(self.context, resolved, &parent, &block.header()).verify()?; } if !switch.disable_reward() { - RewardVerifier::new(&self.context, resolved, &parent).verify()?; + RewardVerifier::new(self.context, resolved, &parent).verify()?; } - let ret = BlockTxsVerifier::new(&self.context, header, resolved).verify( + let ret = BlockTxsVerifier::new(self.context, header, resolved).verify( txs_verify_cache, handle, switch.disable_script(), diff --git a/verification/src/tests/transaction_verifier.rs b/verification/src/tests/transaction_verifier.rs index 8bf2f17728..b1ab689522 100644 --- a/verification/src/tests/transaction_verifier.rs +++ b/verification/src/tests/transaction_verifier.rs @@ -420,7 +420,7 @@ fn test_since() { for v in valids.into_iter() { let since = Since(v); - assert_eq!(since.flags_is_valid(), true); + assert!(since.flags_is_valid()); } let invalids = vec![ @@ -431,7 +431,7 @@ fn test_since() { for v in invalids.into_iter() { let since = Since(v); - assert_eq!(since.flags_is_valid(), false); + assert!(!since.flags_is_valid()); } } diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index ac1b9ca8f6..19d5ba4085 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -17,7 +17,6 @@ use ckb_types::{ prelude::*, }; use std::collections::HashSet; -use std::convert::TryInto; /// The time-related TX verification /// @@ -38,7 +37,7 @@ impl<'a, DL: HeaderProvider> TimeRelativeTransactionVerifier<'a, DL> { tx_env: &'a TxVerifyEnv, ) -> Self { TimeRelativeTransactionVerifier { - maturity: MaturityVerifier::new(&rtx, tx_env.epoch(), consensus.cellbase_maturity()), + maturity: MaturityVerifier::new(rtx, tx_env.epoch(), consensus.cellbase_maturity()), since: SinceVerifier::new(rtx, consensus, data_loader, tx_env), } } @@ -121,7 +120,7 @@ where ContextualTransactionVerifier { compatible: CompatibleVerifier::new(rtx, consensus, tx_env), time_relative: TimeRelativeTransactionVerifier::new( - &rtx, + rtx, consensus, data_loader, tx_env, @@ -241,7 +240,7 @@ impl<'a, DL: CellDataProvider + HeaderProvider + EpochProvider> FeeCalculator<'a if self.transaction.is_cellbase() { Ok(Capacity::zero()) } else { - DaoCalculator::new(&self.consensus, self.data_loader).transaction_fee(&self.transaction) + DaoCalculator::new(self.consensus, self.data_loader).transaction_fee(self.transaction) } } } @@ -926,7 +925,7 @@ where ContextualWithoutScriptTransactionVerifier { compatible: CompatibleVerifier::new(rtx, consensus, tx_env), time_relative: TimeRelativeTransactionVerifier::new( - &rtx, + rtx, consensus, data_loader, tx_env, diff --git a/verification/traits/Cargo.toml b/verification/traits/Cargo.toml index b0dc0ab14d..921e33835c 100644 --- a/verification/traits/Cargo.toml +++ b/verification/traits/Cargo.toml @@ -3,11 +3,11 @@ name = "ckb-verification-traits" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "The CKB verification traits" homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb" [dependencies] bitflags = "1.0" -ckb-error = { path = "../../error", version = "= 0.102.0-pre" } \ No newline at end of file +ckb-error = { path = "../../error", version = "= 0.102.0-pre" } diff --git a/wasm-build-test/Cargo.toml b/wasm-build-test/Cargo.toml index f758df62cb..ef92f52122 100644 --- a/wasm-build-test/Cargo.toml +++ b/wasm-build-test/Cargo.toml @@ -3,7 +3,7 @@ name = "ckb-wasm-test" version = "0.102.0-pre" license = "MIT" authors = ["Nervos Core Dev "] -edition = "2018" +edition = "2021" description = "Testsuite to ensure some crates work with diffent features." homepage = "https://github.com/nervosnetwork/ckb" repository = "https://github.com/nervosnetwork/ckb"