-
Notifications
You must be signed in to change notification settings - Fork 188
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
QA for v1.11.0 #1892
Comments
Exalate commented: Issue Created by: JayChoi1736 |
VRank metric testing Testing process
Result
Details
Note - The unit for metrics is a nanosecond, which means 1e6 is a millisecond |
@hyeonLewis Can you make tests with seven nodes?
I am wondering when logs can be shown in less than 3 milliseconds. |
Sure, I'll test it with seven nodes and update the results.
It means that there're differences between metrics and logs since they have different update timing, and its difference can be maximally 3ms (mostly less than 0.1 ms) |
@hyeonLewis Thank you for the quick reply. I am curious how many times |
Live state pruning testcall trace APIs during sync - OK
Details
pruning enabled + state migration is forbidden - OK
Details
pruning enabled + snapsync can work together - OK
Details
version upgrade when not pruning - OK
Details
Consistency check - Minor flaw
Details
// storage/statedb/pruning_test.go
package statedb
import (
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"github.com/klaytn/klaytn/blockchain/types/account"
"github.com/klaytn/klaytn/common"
"github.com/klaytn/klaytn/log"
"github.com/klaytn/klaytn/rlp"
"github.com/klaytn/klaytn/storage/database"
"github.com/stretchr/testify/assert"
)
var dbDir = ""
func init() {
if dbDir == "" {
home := os.Getenv("HOME")
dbDir = filepath.Join(home, "Library/KEN/klay/chaindata")
}
}
type context struct {
t *testing.T
dbm database.DBManager
tdb *Database
mu sync.Mutex
referenced map[common.ExtHash]bool
stored map[common.ExtHash]bool
}
func newContext(t *testing.T) *context {
dbConfig := &database.DBConfig{
Dir: dbDir,
DBType: database.LevelDB,
SingleDB: false,
NumStateTrieShards: 4,
ParallelDBWrite: true,
OpenFilesLimit: 0,
LevelDBCacheSize: 768,
LevelDBCompression: 0,
LevelDBBufferPool: true,
}
fmt.Printf(">>> Opening %s\n", dbDir)
dbm := database.NewDBManager(dbConfig)
return &context{
t: t,
dbm: dbm,
tdb: NewDatabase(dbm),
referenced: make(map[common.ExtHash]bool),
stored: make(map[common.ExtHash]bool),
}
}
func headBlockNumber(ctx *context) uint64 {
hash := ctx.dbm.ReadHeadBlockHash()
block := ctx.dbm.ReadBlockByHash(hash)
return block.NumberU64()
}
func iterateTrie(ctx *context, it NodeIterator, check map[common.ExtHash]bool, state bool) {
for it.Next(true) {
if state && it.Leaf() {
dec := account.NewAccountSerializer()
err := rlp.DecodeBytes(it.LeafBlob(), &dec)
assert.Nil(ctx.t, err, fmt.Sprintf("decode account %s: %x", it.Parent().Hex(), it.LeafBlob()))
if err != nil {
continue
}
acc := dec.GetAccount()
addr := ctx.tdb.preimage(common.BytesToHash(it.LeafKey()))
if pa, ok := acc.(account.ProgramAccount); ok {
root := pa.GetStorageRoot()
trie, err := NewSecureStorageTrie(root, ctx.tdb, nil)
assert.Nil(ctx.t, err, fmt.Sprintf("storage_%x_%x", addr, root))
if err != nil {
continue
}
storageIt := trie.NodeIterator(nil)
iterateTrie(ctx, storageIt, check, false)
if itErr := it.Error(); itErr != nil {
ctx.t.Logf("it.err %v", itErr)
}
}
}
if common.EmptyHash(it.Hash()) {
continue
}
stack := it.(*nodeIterator).stack
if len(stack) == 0 {
continue
}
curr := stack[len(stack)-1]
hn, _ := curr.node.cache()
eh := common.BytesToExtHash(hn)
check[eh] = true
}
}
func collectState(ctx *context, num uint64) {
block := ctx.dbm.ReadBlockByNumber(num)
assert.NotNil(ctx.t, block, "state_%d", num)
if block == nil {
return
}
root := block.Root()
trie, err := NewSecureTrie(root, ctx.tdb, nil)
assert.Nil(ctx.t, err, fmt.Sprintf("state_%d_%s", num, root.Hex()))
if err != nil {
return
}
it := trie.NodeIterator(nil)
check := make(map[common.ExtHash]bool)
iterateTrie(ctx, it, check, true)
if itErr := it.Error(); itErr != nil {
ctx.t.Logf("it.err %v", itErr)
}
ctx.mu.Lock()
for eh := range check {
ctx.referenced[eh] = true
}
ctx.mu.Unlock()
ctx.t.Logf("state_%d has %d nodes", num, len(check))
}
func collectReferenced(ctx *context) {
var (
retention = uint64(172800)
interval = uint64(128)
round_down = func(n uint64) uint64 {
return n - (n % interval)
}
head = headBlockNumber(ctx)
maxRemain = round_down(head)
minRemain = round_down(head - retention)
)
if head < retention {
minRemain = interval
}
chNum := make(chan uint64, 10000)
chNum <- 0
chNum <- head
for num := maxRemain; num >= minRemain; num -= interval {
chNum <- num
}
ctx.t.Logf("head %d retention %d\n", head, retention)
ctx.t.Logf("%d blocks to traverse (%d - %d)", len(chNum), minRemain, head)
close(chNum)
var wg sync.WaitGroup
numWorkers := runtime.NumCPU()
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func() {
for num := range chNum {
collectState(ctx, num)
}
wg.Done()
}()
}
wg.Wait()
}
func collectStored(ctx *context) {
it := ctx.dbm.GetStateTrieDB().NewIterator(nil, nil)
for it.Next() {
k := it.Key()
if len(k) == common.HashLength || len(k) == common.ExtHashLength {
eh := common.BytesToExtHash(k)
ctx.stored[eh] = true
} else if bytes.HasPrefix(k, []byte("secure-key-")) {
continue
} else if len(k) == 33 && k[0] == 'c' {
continue
} else {
fmt.Printf("Unknown %x\n", k)
}
}
}
func TestPruningConsistency(t *testing.T) {
log.EnableLogForTest(log.LvlCrit, log.LvlError)
ctx := newContext(t)
collectStored(ctx)
collectReferenced(ctx)
R := ctx.referenced
S := ctx.stored
deleted := 0
left := 0
for h := range R {
if _, ok := S[h]; !ok {
fmt.Println("deleted", h.Hex())
deleted++
}
}
for h := range S {
if _, ok := R[h]; !ok {
fmt.Println("left", h.Hex())
left++
}
}
fmt.Printf(">>> stored nodes count %d\n", len(ctx.stored))
fmt.Printf(">>> referenced nodes count %d\n", len(ctx.referenced))
fmt.Printf(">>> deleted %d left %d\n", deleted, left)
}
|
Regular QA Test
|
RocksDB TestHigh memory consumption
StateDB Migration
log for failed migration
Performance test (1-writer / n-readers)
system log
|
Shanghai Hardfork test
|
Yaml configuration
CheckExclusive pointer bugs (remove '&' operator) |
Test Pre-built binaries in different environments
|
Close this since all mandatory testing are done successfully |
QA tasks (~2023/08/14)
Changes
Hardfork features
Improvements
--mnemonic
,--mnemonic-path
flag to homi for mnemonic-based key generation #1813)Fixes
Miscellaneous
governance
RPC namespace #1780 Bump golang.org/x/crypto from 0.0.0-20220214200702-86341886e292 to 0.1.0 #1805 Bump golang.org/x/net from 0.0.0-20220225172249-27dd8689420f to 0.7.0 #1807 Bump github.com/prometheus/client_golang from 1.1.0 to 1.11.1 #1808 update rjeczalik/notify version to 0.9.3 #1816 Bump github.com/docker/docker from 1.13.1 to 20.10.24+incompatible #1818 Stop loading rewardwallet #1819 update codeowner #1821 cmd: use getParams() to fix the test failure at cmd/utils/nodecmd #1826 Add BlockchainContractCaller #1828 [Misc] Copyright modified #1838 Refactor Trie hasher using onRoot flag #1840 Add trie node unit tests #1843 Refactor DiskDB trie accessors #1846 [Validator] Tidy-up of consolidated staking amount calculator #1856 Fix comment for AddBalance #1865 Benchmark Ecrecover and BLS verify in parallel #1867 [ABI] Out-of-bound access fixed #1872 Bump google.golang.org/grpc from 1.32.0 to 1.53.0 #1875 [EVM] Avoid unncessary code execution during a contract deployment #1876 build: update dd-trace-go.v1 to v1.42.0 #1880 Fix getStakingInfoFromAddressBook error handling #1882 params: add ethtxtype fork indicator at chainconfig.Rules #1887 params: rename mantle to shanghaiCompatible since mantle will be named for v1.12 #1890 Add CurrentBlock in backends.BlockChainForCaller #1893 Change version to v1.11.0 #1895 Update the comment of CommitExt #1896 Change Log Level for "Mining too far in the future" to DEBUG #1897 Fix nodecmd tests #1909)The text was updated successfully, but these errors were encountered: