Skip to content

Commit

Permalink
Merge branch 'ethereum:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
gitteri committed Jun 6, 2022
2 parents 89fb050 + 997f1c4 commit bfaa203
Show file tree
Hide file tree
Showing 28 changed files with 572 additions and 155 deletions.
15 changes: 12 additions & 3 deletions cmd/abigen/main.go
Expand Up @@ -55,7 +55,7 @@ var (
}
jsonFlag = cli.StringFlag{
Name: "combined-json",
Usage: "Path to the combined-json file generated by compiler",
Usage: "Path to the combined-json file generated by compiler, - for STDIN",
}
excFlag = cli.StringFlag{
Name: "exc",
Expand Down Expand Up @@ -165,9 +165,18 @@ func abigen(c *cli.Context) error {
var contracts map[string]*compiler.Contract

if c.GlobalIsSet(jsonFlag.Name) {
jsonOutput, err := os.ReadFile(c.GlobalString(jsonFlag.Name))
var (
input = c.GlobalString(jsonFlag.Name)
jsonOutput []byte
err error
)
if input == "-" {
jsonOutput, err = io.ReadAll(os.Stdin)
} else {
jsonOutput, err = os.ReadFile(input)
}
if err != nil {
utils.Fatalf("Failed to read combined-json from compiler: %v", err)
utils.Fatalf("Failed to read combined-json: %v", err)
}
contracts, err = compiler.ParseCombinedJSON(jsonOutput, "", "", "", "")
if err != nil {
Expand Down
5 changes: 3 additions & 2 deletions cmd/geth/config.go
Expand Up @@ -164,7 +164,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Warn users to migrate if they have a legacy freezer format.
if eth != nil {
if eth != nil && !ctx.GlobalIsSet(utils.IgnoreLegacyReceiptsFlag.Name) {
firstIdx := uint64(0)
// Hack to speed up check for mainnet because we know
// the first non-empty block.
Expand All @@ -176,7 +176,8 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if err != nil {
log.Error("Failed to check db for legacy receipts", "err", err)
} else if isLegacy {
log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
stack.Close()
utils.Fatalf("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
}
}

Expand Down
4 changes: 2 additions & 2 deletions cmd/geth/dbcmd.go
Expand Up @@ -307,7 +307,7 @@ func checkStateContent(ctx *cli.Context) error {
start []byte
)
if ctx.NArg() > 1 {
return fmt.Errorf("Max 1 argument: %v", ctx.Command.ArgsUsage)
return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() > 0 {
if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
Expand All @@ -332,8 +332,8 @@ func checkStateContent(ctx *cli.Context) error {
)
for it.Next() {
count++
v := it.Value()
k := it.Key()
v := it.Value()
hasher.Reset()
hasher.Write(v)
hasher.Read(got)
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Expand Up @@ -151,6 +151,7 @@ var (
utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag,
utils.IgnoreLegacyReceiptsFlag,
configFileFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags)

Expand Down
26 changes: 22 additions & 4 deletions cmd/geth/snapshot.go
Expand Up @@ -105,7 +105,7 @@ data, and verifies that all snapshot storage data has a corresponding account.
},
{
Name: "traverse-state",
Usage: "Traverse the state with given root hash for verification",
Usage: "Traverse the state with given root hash and perform quick verification",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseState),
Category: "MISCELLANEOUS COMMANDS",
Expand All @@ -121,7 +121,7 @@ It's also usable without snapshot enabled.
},
{
Name: "traverse-rawstate",
Usage: "Traverse the state with given root hash for verification",
Usage: "Traverse the state with given root hash and perform detailed verification",
ArgsUsage: "<root>",
Action: utils.MigrateFlags(traverseRawState),
Category: "MISCELLANEOUS COMMANDS",
Expand Down Expand Up @@ -367,6 +367,8 @@ func traverseRawState(ctx *cli.Context) error {
codes int
lastReport time.Time
start = time.Now()
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
)
accIter := t.NodeIterator(nil)
for accIter.Next(true) {
Expand All @@ -376,10 +378,18 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) {
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
hasher.Reset()
hasher.Write(blob)
hasher.Read(got)
if !bytes.Equal(got, node.Bytes()) {
log.Error("Invalid trie node(account)", "hash", node.Hex(), "value", blob)
return errors.New("invalid account node")
}
}
// If it's a leaf node, yes we are touching an account,
// dig into the storage trie further.
Expand All @@ -404,10 +414,18 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) {
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
hasher.Reset()
hasher.Write(blob)
hasher.Read(got)
if !bytes.Equal(got, node.Bytes()) {
log.Error("Invalid trie node(storage)", "hash", node.Hex(), "value", blob)
return errors.New("invalid storage node")
}
}
// Bump the counter if it's leaf node.
if storageIter.Leaf() {
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/usage.go
Expand Up @@ -227,6 +227,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Flags: []cli.Flag{
utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
utils.IgnoreLegacyReceiptsFlag,
cli.HelpFlag,
},
},
Expand Down
4 changes: 4 additions & 0 deletions cmd/utils/flags.go
Expand Up @@ -566,6 +566,10 @@ var (
Name: "nocompaction",
Usage: "Disables db compaction after import",
}
IgnoreLegacyReceiptsFlag = cli.BoolFlag{
Name: "ignore-legacy-receipts",
Usage: "Geth will start up even if there are legacy receipts in freezer",
}
// RPC settings
IPCDisabledFlag = cli.BoolFlag{
Name: "ipcdisable",
Expand Down
18 changes: 12 additions & 6 deletions consensus/clique/clique.go
Expand Up @@ -180,7 +180,7 @@ type Clique struct {

signer common.Address // Ethereum address of the signing key
signFn SignerFn // Signer function to authorize hashes with
lock sync.RWMutex // Protects the signer fields
lock sync.RWMutex // Protects the signer and proposals fields

// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
Expand Down Expand Up @@ -507,9 +507,8 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
if err != nil {
return err
}
c.lock.RLock()
if number%c.config.Epoch != 0 {
c.lock.RLock()

// Gather all the proposals that make sense voting on
addresses := make([]common.Address, 0, len(c.proposals))
for address, authorize := range c.proposals {
Expand All @@ -526,10 +525,14 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
copy(header.Nonce[:], nonceDropVote)
}
}
c.lock.RUnlock()
}

// Copy signer protected by mutex to avoid race condition
signer := c.signer
c.lock.RUnlock()

// Set the correct difficulty
header.Difficulty = calcDifficulty(snap, c.signer)
header.Difficulty = calcDifficulty(snap, signer)

// Ensure the extra data has all its components
if len(header.Extra) < extraVanity {
Expand Down Expand Up @@ -666,7 +669,10 @@ func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64,
if err != nil {
return nil
}
return calcDifficulty(snap, c.signer)
c.lock.RLock()
signer := c.signer
c.lock.RUnlock()
return calcDifficulty(snap, signer)
}

func calcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
Expand Down
42 changes: 21 additions & 21 deletions consensus/misc/eip1559.go
Expand Up @@ -58,36 +58,36 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
return new(big.Int).SetUint64(params.InitialBaseFee)
}

var (
parentGasTarget = parent.GasLimit / params.ElasticityMultiplier
parentGasTargetBig = new(big.Int).SetUint64(parentGasTarget)
baseFeeChangeDenominator = new(big.Int).SetUint64(params.BaseFeeChangeDenominator)
)
parentGasTarget := parent.GasLimit / params.ElasticityMultiplier
// If the parent gasUsed is the same as the target, the baseFee remains unchanged.
if parent.GasUsed == parentGasTarget {
return new(big.Int).Set(parent.BaseFee)
}

var (
num = new(big.Int)
denom = new(big.Int)
)

if parent.GasUsed > parentGasTarget {
// If the parent block used more gas than its target, the baseFee should increase.
gasUsedDelta := new(big.Int).SetUint64(parent.GasUsed - parentGasTarget)
x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta)
y := x.Div(x, parentGasTargetBig)
baseFeeDelta := math.BigMax(
x.Div(y, baseFeeChangeDenominator),
common.Big1,
)
// max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parent.GasUsed - parentGasTarget)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
baseFeeDelta := math.BigMax(num, common.Big1)

return x.Add(parent.BaseFee, baseFeeDelta)
return num.Add(parent.BaseFee, baseFeeDelta)
} else {
// Otherwise if the parent block used less gas than its target, the baseFee should decrease.
gasUsedDelta := new(big.Int).SetUint64(parentGasTarget - parent.GasUsed)
x := new(big.Int).Mul(parent.BaseFee, gasUsedDelta)
y := x.Div(x, parentGasTargetBig)
baseFeeDelta := x.Div(y, baseFeeChangeDenominator)
// max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parentGasTarget - parent.GasUsed)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
baseFee := num.Sub(parent.BaseFee, num)

return math.BigMax(
x.Sub(parent.BaseFee, baseFeeDelta),
common.Big0,
)
return math.BigMax(baseFee, common.Big0)
}
}
3 changes: 2 additions & 1 deletion contracts/checkpointoracle/oracle.go
Expand Up @@ -17,7 +17,8 @@
// Package checkpointoracle is a an on-chain light client checkpoint oracle.
package checkpointoracle

//go:generate go run ../../cmd/abigen --sol contract/oracle.sol --pkg contract --out contract/oracle.go
//go:generate solc contract/oracle.sol --combined-json bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc,metadata,hashes --optimize -o ./ --overwrite
//go:generate go run ../../cmd/abigen --pkg contract --out contract/oracle.go --combined-json ./combined.json

import (
"errors"
Expand Down
7 changes: 7 additions & 0 deletions core/beacon/types.go
Expand Up @@ -148,6 +148,13 @@ func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
if len(params.ExtraData) > 32 {
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
}
if len(params.LogsBloom) != 256 {
return nil, fmt.Errorf("invalid logsBloom length: %v", len(params.LogsBloom))
}
// Check that baseFeePerGas is not negative or too big
if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
}
header := &types.Header{
ParentHash: params.ParentHash,
UncleHash: types.EmptyUncleHash,
Expand Down
48 changes: 33 additions & 15 deletions core/blockchain.go
Expand Up @@ -1965,8 +1965,8 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
oldChain types.Blocks
commonBlock *types.Block

deletedTxs types.Transactions
addedTxs types.Transactions
deletedTxs []common.Hash
addedTxs []common.Hash

deletedLogs [][]*types.Log
rebirthLogs [][]*types.Log
Expand All @@ -1976,7 +1976,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// Old chain is longer, gather all transactions and logs as deleted ones
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
for _, tx := range oldBlock.Transactions() {
deletedTxs = append(deletedTxs, tx.Hash())
}

// Collect deleted logs for notification
logs := bc.collectLogs(oldBlock.Hash(), true)
Expand Down Expand Up @@ -2006,7 +2008,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
}
// Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
for _, tx := range oldBlock.Transactions() {
deletedTxs = append(deletedTxs, tx.Hash())
}

// Collect deleted logs for notification
logs := bc.collectLogs(oldBlock.Hash(), true)
Expand All @@ -2025,6 +2029,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return fmt.Errorf("invalid new chain")
}
}

// Ensure the user sees large reorgs
if len(oldChain) > 0 && len(newChain) > 0 {
logFn := log.Info
Expand All @@ -2041,7 +2046,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} else if len(newChain) > 0 {
// Special case happens in the post merge stage that current head is
// the ancestor of new head while these two blocks are not consecutive
log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash())
log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash())
blockReorgAddMeter.Mark(int64(len(newChain)))
} else {
// len(newChain) == 0 && len(oldChain) > 0
Expand All @@ -2054,22 +2059,26 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// Insert the block in the canonical way, re-writing history
bc.writeHeadBlock(newChain[i])

// Collect reborn logs due to chain reorg
logs := bc.collectLogs(newChain[i].Hash(), false)
if len(logs) > 0 {
rebirthLogs = append(rebirthLogs, logs)
}
// Collect the new added transactions.
addedTxs = append(addedTxs, newChain[i].Transactions()...)
for _, tx := range newChain[i].Transactions() {
addedTxs = append(addedTxs, tx.Hash())
}
}

// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
indexesBatch := bc.db.NewBatch()
for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
}

// Delete all hash markers that are not part of the new canonical chain.
// Because the reorg function does not handle new chain head, all hash
// markers greater than or equal to new chain head should be deleted.
number := commonBlock.NumberU64()
if len(newChain) > 1 {
number = newChain[1].NumberU64()
}
// Delete any canonical number assignments above the new head
number := bc.CurrentBlock().NumberU64()
for i := number + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(bc.db, i)
if hash == (common.Hash{}) {
Expand All @@ -2080,6 +2089,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if err := indexesBatch.Write(); err != nil {
log.Crit("Failed to delete useless indexes", "err", err)
}

// Collect the logs
for i := len(newChain) - 1; i >= 1; i-- {
// Collect reborn logs due to chain reorg
logs := bc.collectLogs(newChain[i].Hash(), false)
if len(logs) > 0 {
rebirthLogs = append(rebirthLogs, logs)
}
}
// If any logs need to be fired, do it now. In theory we could avoid creating
// this goroutine if there are no events to fire, but realistcally that only
// ever happens if we're reorging empty blocks, which will only happen on idle
Expand Down

0 comments on commit bfaa203

Please sign in to comment.