diff --git a/config/config_test.go b/config/config_test.go index 1435e49c01..7b35e33b0a 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -101,6 +101,10 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.Finalizer.ResourceExhaustedMarginPct", expectedValue: uint32(10), }, + { + path: "Sequencer.Finalizer.StateRootSyncInterval", + expectedValue: types.NewDuration(3600 * time.Second), + }, { path: "Sequencer.Finalizer.ForcedBatchesL1BlockConfirmations", expectedValue: uint64(64), @@ -127,7 +131,7 @@ func Test_Defaults(t *testing.T) { }, { path: "Sequencer.Finalizer.BatchMaxDeltaTimestamp", - expectedValue: types.NewDuration(10 * time.Second), + expectedValue: types.NewDuration(1800 * time.Second), }, { path: "Sequencer.Finalizer.Metrics.Interval", diff --git a/config/default.go b/config/default.go index 06b34dc45c..6416cb9fab 100644 --- a/config/default.go +++ b/config/default.go @@ -146,12 +146,13 @@ StateConsistencyCheckInterval = "5s" ForcedBatchesCheckInterval = "10s" L1InfoTreeL1BlockConfirmations = 64 L1InfoTreeCheckInterval = "10s" - BatchMaxDeltaTimestamp = "10s" + BatchMaxDeltaTimestamp = "1800s" L2BlockMaxDeltaTimestamp = "3s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "3600s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index 7fdad2a456..436a0d84fa 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -101,9 +101,10 @@ StateConsistencyCheckInterval = "5s" BatchMaxDeltaTimestamp = "120s" L2BlockMaxDeltaTimestamp = "3s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "360s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/db/migrations/state/0021.sql b/db/migrations/state/0021.sql index bd982feab1..5e0c527e32 100644 --- a/db/migrations/state/0021.sql +++ b/db/migrations/state/0021.sql @@ -1,25 +1,8 @@ -- +migrate Up - --- the update below fix the wrong receipt TX indexes -WITH map_fix_tx_index AS ( - SELECT t.l2_block_num AS block_num - , t.hash AS tx_hash - , r.tx_index AS current_index - , (ROW_NUMBER() OVER (PARTITION BY t.l2_block_num ORDER BY r.tx_index))-1 AS correct_index - FROM state.receipt r - INNER JOIN state."transaction" t - ON t.hash = r.tx_hash -) -UPDATE state.receipt AS r - SET tx_index = m.correct_index - FROM map_fix_tx_index m - WHERE m.block_num = r.block_num - AND m.tx_hash = r.tx_hash - AND m.current_index = r.tx_index - AND m.current_index != m.correct_index; - +ALTER TABLE state.batch + ADD COLUMN high_reserved_counters JSONB; -- +migrate Down - --- no action is needed, the data fixed by the --- migrate up must remain fixed \ No newline at end of file +ALTER TABLE state.batch + DROP COLUMN high_reserved_counters; + \ No newline at end of file diff --git a/db/migrations/state/0021_test.go b/db/migrations/state/0021_test.go index c936c42e09..512ba55191 100644 --- a/db/migrations/state/0021_test.go +++ b/db/migrations/state/0021_test.go @@ -4,142 +4,61 @@ import ( "database/sql" "testing" - "github.com/0xPolygonHermez/zkevm-node/hex" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" ) -type migrationTest0021TestCase struct { - Name string - Block migrationTest0021TestCaseBlock -} - -type migrationTest0021TestCaseBlock struct { - Transactions []migrationTest0021TestCaseTransaction -} - -type migrationTest0021TestCaseTransaction struct { - CurrentIndex uint -} - -type migrationTest0021 struct { - TestCases []migrationTest0021TestCase -} +type migrationTest0021 struct{} func (m migrationTest0021) InsertData(db *sql.DB) error { - const addBlock0 = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES (0, now(), '0x0')" - if _, err := db.Exec(addBlock0); err != nil { - return err - } - - const addBatch0 = ` + const insertBatch0 = ` INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip) VALUES (0,'0x0000', '0x0000', '0x0000', '0x0000', now(), '0x0000', null, null, true)` - if _, err := db.Exec(addBatch0); err != nil { - return err - } - const addL2Block = "INSERT INTO state.l2block (block_num, block_hash, header, uncles, parent_hash, state_root, received_at, batch_num, created_at) VALUES ($1, $2, '{}', '{}', '0x0', '0x0', now(), 0, now())" - const addTransaction = "INSERT INTO state.transaction (hash, encoded, decoded, l2_block_num, effective_percentage, l2_hash) VALUES ($1, 'ABCDEF', '{}', $2, 255, $1)" - const addReceipt = "INSERT INTO state.receipt (tx_hash, type, post_state, status, cumulative_gas_used, gas_used, effective_gas_price, block_num, tx_index, contract_address) VALUES ($1, 1, null, 1, 1234, 1234, 1, $2, $3, '')" - - txUnique := 0 - for tci, testCase := range m.TestCases { - blockNum := uint64(tci + 1) - blockHash := common.HexToHash(hex.EncodeUint64(blockNum)).String() - if _, err := db.Exec(addL2Block, blockNum, blockHash); err != nil { - return err - } - for _, tx := range testCase.Block.Transactions { - txUnique++ - txHash := common.HexToHash(hex.EncodeUint64(uint64(txUnique))).String() - if _, err := db.Exec(addTransaction, txHash, blockNum); err != nil { - return err - } - if _, err := db.Exec(addReceipt, txHash, blockNum, tx.CurrentIndex); err != nil { - return err - } - } + // insert batch + _, err := db.Exec(insertBatch0) + if err != nil { + return err } return nil } func (m migrationTest0021) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { - const getReceiptsByBlock = "SELECT r.tx_index FROM state.receipt r WHERE r.block_num = $1 ORDER BY r.tx_index" + var result int - for tci := range m.TestCases { - blockNum := uint64(tci + 1) + // Check column high_reserved_counters exists in state.batch table + const getColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='batch' and column_name='high_reserved_counters'` + row := db.QueryRow(getColumn) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 1, result) - rows, err := db.Query(getReceiptsByBlock, blockNum) - require.NoError(t, err) + const insertBatch0 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip, high_reserved_counters) + VALUES (1,'0x0001', '0x0001', '0x0001', '0x0001', now(), '0x0001', null, null, true, '{"Steps": 1890125}')` - var expectedIndex = uint(0) - var txIndex uint - for rows.Next() { - err := rows.Scan(&txIndex) - require.NoError(t, err) - require.Equal(t, expectedIndex, txIndex) - expectedIndex++ - } - } + // insert batch 1 + _, err := db.Exec(insertBatch0) + assert.NoError(t, err) + + const insertBatch1 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip, high_reserved_counters) + VALUES (2,'0x0002', '0x0002', '0x0002', '0x0002', now(), '0x0002', null, null, false, '{"Steps": 1890125}')` + + // insert batch 2 + _, err = db.Exec(insertBatch1) + assert.NoError(t, err) } func (m migrationTest0021) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { - m.RunAssertsAfterMigrationUp(t, db) + var result int + + // Check column high_reserved_counters doesn't exists in state.batch table + const getCheckedColumn = `SELECT count(*) FROM information_schema.columns WHERE table_name='batch' and column_name='high_reserved_counters'` + row := db.QueryRow(getCheckedColumn) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 0, result) } func TestMigration0021(t *testing.T) { - runMigrationTest(t, 21, migrationTest0021{ - TestCases: []migrationTest0021TestCase{ - { - Name: "single tx with correct index", - Block: migrationTest0021TestCaseBlock{ - Transactions: []migrationTest0021TestCaseTransaction{ - {CurrentIndex: 0}, - }, - }, - }, - { - Name: "multiple txs indexes are correct", - Block: migrationTest0021TestCaseBlock{ - Transactions: []migrationTest0021TestCaseTransaction{ - {CurrentIndex: 0}, - {CurrentIndex: 1}, - {CurrentIndex: 2}, - }, - }, - }, - { - Name: "single tx with wrong tx index", - Block: migrationTest0021TestCaseBlock{ - Transactions: []migrationTest0021TestCaseTransaction{ - {CurrentIndex: 3}, - }, - }, - }, - { - Name: "multiple txs missing 0 index", - Block: migrationTest0021TestCaseBlock{ - Transactions: []migrationTest0021TestCaseTransaction{ - {CurrentIndex: 1}, - {CurrentIndex: 2}, - {CurrentIndex: 3}, - {CurrentIndex: 4}, - }, - }, - }, - { - Name: "multiple has index 0 but also txs index gap", - Block: migrationTest0021TestCaseBlock{ - Transactions: []migrationTest0021TestCaseTransaction{ - {CurrentIndex: 0}, - {CurrentIndex: 2}, - {CurrentIndex: 4}, - {CurrentIndex: 6}, - }, - }, - }, - }, - }) + runMigrationTest(t, 21, migrationTest0021{}) } diff --git a/db/migrations/state/0022.sql b/db/migrations/state/0022.sql index d2955452a5..23819fd91a 100644 --- a/db/migrations/state/0022.sql +++ b/db/migrations/state/0022.sql @@ -1,12 +1,25 @@ -- +migrate Up --- +migrate Up -ALTER TABLE state.exit_root - ADD COLUMN IF NOT EXISTS l1_info_tree_recursive_index BIGINT DEFAULT NULL UNIQUE; -CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_recursive_index ON state.exit_root (l1_info_tree_recursive_index); +-- the update below fix the wrong receipt TX indexes +WITH map_fix_tx_index AS ( + SELECT t.l2_block_num AS block_num + , t.hash AS tx_hash + , r.tx_index AS current_index + , (ROW_NUMBER() OVER (PARTITION BY t.l2_block_num ORDER BY r.tx_index))-1 AS correct_index + FROM state.receipt r + INNER JOIN state."transaction" t + ON t.hash = r.tx_hash +) +UPDATE state.receipt AS r + SET tx_index = m.correct_index + FROM map_fix_tx_index m + WHERE m.block_num = r.block_num + AND m.tx_hash = r.tx_hash + AND m.current_index = r.tx_index + AND m.current_index != m.correct_index; + -- +migrate Down -ALTER TABLE state.exit_root - DROP COLUMN IF EXISTS l1_info_tree_recursive_index; -DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_recursive_index; +-- no action is needed, the data fixed by the +-- migrate up must remain fixed diff --git a/db/migrations/state/0022_test.go b/db/migrations/state/0022_test.go index f7a0f9f59e..155e632079 100644 --- a/db/migrations/state/0022_test.go +++ b/db/migrations/state/0022_test.go @@ -3,104 +3,143 @@ package migrations_test import ( "database/sql" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/0xPolygonHermez/zkevm-node/hex" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" ) -type migrationTest0022 struct { - migrationBase - - blockHashValue string - mainExitRootValue string - rollupExitRootValue string - globalExitRootValue string - previousBlockHashValue string - l1InfoRootValue string +type migrationTest0022TestCase struct { + Name string + Block migrationTest0022TestCaseBlock } -func (m migrationTest0022) insertBlock(blockNumber uint64, db *sql.DB) error { - const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" - if _, err := db.Exec(addBlock, blockNumber, time.Now(), m.blockHashValue); err != nil { - return err - } - return nil +type migrationTest0022TestCaseBlock struct { + Transactions []migrationTest0022TestCaseTransaction } -func (m migrationTest0022) insertRowInOldTable(db *sql.DB, args ...interface{}) error { - sql := ` - INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index) - VALUES ( $1, $2, $3, $4, $5, $6, $7, $8);` - - _, err := db.Exec(sql, args...) - return err +type migrationTest0022TestCaseTransaction struct { + CurrentIndex uint } -func (m migrationTest0022) insertRowInMigratedTable(db *sql.DB, args ...interface{}) error { - sql := ` - INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index, l1_info_tree_recursive_index) - VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9);` - - _, err := db.Exec(sql, args...) - return err +type migrationTest0022 struct { + TestCases []migrationTest0022TestCase } func (m migrationTest0022) InsertData(db *sql.DB) error { - var err error - for i := uint64(1); i <= 6; i++ { - if err = m.insertBlock(i, db); err != nil { + const addBlock0 = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES (0, now(), '0x0')" + if _, err := db.Exec(addBlock0); err != nil { + return err + } + + const addBatch0 = ` + INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip) + VALUES (0,'0x0000', '0x0000', '0x0000', '0x0000', now(), '0x0000', null, null, true)` + if _, err := db.Exec(addBatch0); err != nil { + return err + } + + const addL2Block = "INSERT INTO state.l2block (block_num, block_hash, header, uncles, parent_hash, state_root, received_at, batch_num, created_at) VALUES ($1, $2, '{}', '{}', '0x0', '0x0', now(), 0, now())" + const addTransaction = "INSERT INTO state.transaction (hash, encoded, decoded, l2_block_num, effective_percentage, l2_hash) VALUES ($1, 'ABCDEF', '{}', $2, 255, $1)" + const addReceipt = "INSERT INTO state.receipt (tx_hash, type, post_state, status, cumulative_gas_used, gas_used, effective_gas_price, block_num, tx_index, contract_address) VALUES ($1, 1, null, 1, 1234, 1234, 1, $2, $3, '')" + + txUnique := 0 + for tci, testCase := range m.TestCases { + blockNum := uint64(tci + 1) + blockHash := common.HexToHash(hex.EncodeUint64(blockNum)).String() + if _, err := db.Exec(addL2Block, blockNum, blockHash); err != nil { return err } + for _, tx := range testCase.Block.Transactions { + txUnique++ + txHash := common.HexToHash(hex.EncodeUint64(uint64(txUnique))).String() + if _, err := db.Exec(addTransaction, txHash, blockNum); err != nil { + return err + } + if _, err := db.Exec(addReceipt, txHash, blockNum, tx.CurrentIndex); err != nil { + return err + } + } } return nil } func (m migrationTest0022) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { - m.AssertNewAndRemovedItemsAfterMigrationUp(t, db) + const getReceiptsByBlock = "SELECT r.tx_index FROM state.receipt r WHERE r.block_num = $1 ORDER BY r.tx_index" - var nilL1InfoTreeIndex *uint = nil - err := m.insertRowInOldTable(db, 1, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) - assert.NoError(t, err) + for tci := range m.TestCases { + blockNum := uint64(tci + 1) - err = m.insertRowInOldTable(db, 2, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(1)) - assert.NoError(t, err) + rows, err := db.Query(getReceiptsByBlock, blockNum) + require.NoError(t, err) - err = m.insertRowInMigratedTable(db, 3, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 1) - assert.NoError(t, err) + var expectedIndex = uint(0) + var txIndex uint + for rows.Next() { + err := rows.Scan(&txIndex) + require.NoError(t, err) + require.Equal(t, expectedIndex, txIndex) + expectedIndex++ + } + } } func (m migrationTest0022) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { - m.AssertNewAndRemovedItemsAfterMigrationDown(t, db) - - var nilL1InfoTreeIndex *uint = nil - err := m.insertRowInOldTable(db, 4, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) - assert.NoError(t, err) - - err = m.insertRowInOldTable(db, 5, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(2)) - assert.NoError(t, err) - - err = m.insertRowInMigratedTable(db, 6, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 2) - assert.Error(t, err) + m.RunAssertsAfterMigrationUp(t, db) } func TestMigration0022(t *testing.T) { - m := migrationTest0022{ - migrationBase: migrationBase{ - newIndexes: []string{ - "idx_exit_root_l1_info_tree_recursive_index", + runMigrationTest(t, 22, migrationTest0022{ + TestCases: []migrationTest0022TestCase{ + { + Name: "single tx with correct index", + Block: migrationTest0022TestCaseBlock{ + Transactions: []migrationTest0022TestCaseTransaction{ + {CurrentIndex: 0}, + }, + }, + }, + { + Name: "multiple txs indexes are correct", + Block: migrationTest0022TestCaseBlock{ + Transactions: []migrationTest0022TestCaseTransaction{ + {CurrentIndex: 0}, + {CurrentIndex: 1}, + {CurrentIndex: 2}, + }, + }, + }, + { + Name: "single tx with wrong tx index", + Block: migrationTest0022TestCaseBlock{ + Transactions: []migrationTest0022TestCaseTransaction{ + {CurrentIndex: 3}, + }, + }, }, - newColumns: []columnMetadata{ - {"state", "exit_root", "l1_info_tree_recursive_index"}, + { + Name: "multiple txs missing 0 index", + Block: migrationTest0022TestCaseBlock{ + Transactions: []migrationTest0022TestCaseTransaction{ + {CurrentIndex: 1}, + {CurrentIndex: 2}, + {CurrentIndex: 3}, + {CurrentIndex: 4}, + }, + }, + }, + { + Name: "multiple has index 0 but also txs index gap", + Block: migrationTest0022TestCaseBlock{ + Transactions: []migrationTest0022TestCaseTransaction{ + {CurrentIndex: 0}, + {CurrentIndex: 2}, + {CurrentIndex: 4}, + {CurrentIndex: 6}, + }, + }, }, }, - - blockHashValue: "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", - mainExitRootValue: "0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", - rollupExitRootValue: "0xadb91a6a1fce56eaea561002bc9a993f4e65a7710bd72f4eee3067cbd73a743c", - globalExitRootValue: "0x5bf4af1a651a2a74b36e6eb208481f94c69fc959f756223dfa49608061937585", - previousBlockHashValue: "0xe865e912b504572a4d80ad018e29797e3c11f00bf9ae2549548a25779c9d7e57", - l1InfoRootValue: "0x2b9484b83c6398033241865b015fb9430eb3e159182a6075d00c924845cc393e", - } - runMigrationTest(t, 22, m) + }) } diff --git a/db/migrations/state/0023.sql b/db/migrations/state/0023.sql index 5a7dc776ac..d2955452a5 100644 --- a/db/migrations/state/0023.sql +++ b/db/migrations/state/0023.sql @@ -1,57 +1,12 @@ -- +migrate Up -CREATE TABLE IF NOT EXISTS state.blob_sequence -( - index BIGINT PRIMARY KEY, - coinbase VARCHAR, - final_acc_input_hash VARCHAR, - first_blob_sequenced BIGINT, - last_blob_sequenced BIGINT, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), - received_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), - block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE -); - -comment on column state.blob_sequence.index is 'It is the id of this sequence, this value is internal and incremental'; -comment on column state.blob_sequence.block_num is 'L1 Block where appear this sequence'; -comment on column state.blob_sequence.first_blob_sequenced is 'first (included) blob_inner_num of this sequence (state.blob_inner.blob_inner_num)'; -comment on column state.blob_sequence.first_blob_sequenced is 'last (included) blob_inner_num of this sequence (state.blob_inner.blob_inner_num)'; -comment on column state.blob_sequence.received_at is 'time when it was received in node'; -comment on column state.blob_sequence.created_at is 'time when was created on L1 (L1block tstamp)'; - -CREATE TABLE IF NOT EXISTS state.blob_inner_in -( - blob_inner_num BIGINT PRIMARY KEY, - blob_sequence_index BIGINT NOT NULL REFERENCES state.blob_sequence (index) ON DELETE CASCADE, - blob_type VARCHAR, - max_sequence_timestamp TIMESTAMP WITH TIME ZONE, - zk_gas_limit BIGINT, - l1_info_tree_leaf_index BIGINT, - l1_info_tree_root VARCHAR, - blob_data_hash VARCHAR, - updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), - -- if blob_type== blob - blob_type_index BIGINT, - blob_type_z VARCHAR, - blob_type_y VARCHAR, - blob_type_commitment VARCHAR, - blob_type_proof VARCHAR -); - -comment on column state.blob_inner_in.updated_at is 'the creation time is blob_sequence.created_at, this is the last time when was updated (tipically Now() )'; -comment on column state.blob_inner_in.blob_type is 'call_data, blob or forced'; -comment on column state.blob_inner_in.blob_data_hash is 'is the hash of the blobData'; - -CREATE TABLE IF NOT EXISTS state.incoming_batch -( - batch_num BIGINT PRIMARY KEY, - blob_inner_num BIGINT NOT NULL REFERENCES state.blob_inner_in (blob_inner_num) ON DELETE CASCADE, - data BYTEA, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), - updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() -); +-- +migrate Up +ALTER TABLE state.exit_root + ADD COLUMN IF NOT EXISTS l1_info_tree_recursive_index BIGINT DEFAULT NULL UNIQUE; +CREATE INDEX IF NOT EXISTS idx_exit_root_l1_info_tree_recursive_index ON state.exit_root (l1_info_tree_recursive_index); -- +migrate Down -DROP TABLE IF EXISTS state.incoming_batch; -DROP TABLE IF EXISTS state.blob_inner_in; -DROP TABLE IF EXISTS state.blob_sequence; +ALTER TABLE state.exit_root + DROP COLUMN IF EXISTS l1_info_tree_recursive_index; +DROP INDEX IF EXISTS state.idx_exit_root_l1_info_tree_recursive_index; + diff --git a/db/migrations/state/0023_test.go b/db/migrations/state/0023_test.go new file mode 100644 index 0000000000..1dfc555ec9 --- /dev/null +++ b/db/migrations/state/0023_test.go @@ -0,0 +1,106 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0023 struct { + migrationBase + + blockHashValue string + mainExitRootValue string + rollupExitRootValue string + globalExitRootValue string + previousBlockHashValue string + l1InfoRootValue string +} + +func (m migrationTest0023) insertBlock(blockNumber uint64, db *sql.DB) error { + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, blockNumber, time.Now(), m.blockHashValue); err != nil { + return err + } + return nil +} + +func (m migrationTest0023) insertRowInOldTable(db *sql.DB, args ...interface{}) error { + sql := ` + INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8);` + + _, err := db.Exec(sql, args...) + return err +} + +func (m migrationTest0023) insertRowInMigratedTable(db *sql.DB, args ...interface{}) error { + sql := ` + INSERT INTO state.exit_root (block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index, l1_info_tree_recursive_index) + VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9);` + + _, err := db.Exec(sql, args...) + return err +} + +func (m migrationTest0023) InsertData(db *sql.DB) error { + var err error + for i := uint64(1); i <= 6; i++ { + if err = m.insertBlock(i, db); err != nil { + return err + } + } + + return nil +} + +func (m migrationTest0023) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + m.AssertNewAndRemovedItemsAfterMigrationUp(t, db) + + var nilL1InfoTreeIndex *uint = nil + err := m.insertRowInOldTable(db, 1, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) + assert.NoError(t, err) + + err = m.insertRowInOldTable(db, 2, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(1)) + assert.NoError(t, err) + + err = m.insertRowInMigratedTable(db, 3, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 1) + assert.NoError(t, err) +} + +func (m migrationTest0023) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + m.AssertNewAndRemovedItemsAfterMigrationDown(t, db) + + var nilL1InfoTreeIndex *uint = nil + err := m.insertRowInOldTable(db, 4, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex) + assert.NoError(t, err) + + err = m.insertRowInOldTable(db, 5, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, uint(2)) + assert.NoError(t, err) + + err = m.insertRowInMigratedTable(db, 6, time.Now().UTC(), m.mainExitRootValue, m.rollupExitRootValue, m.globalExitRootValue, m.previousBlockHashValue, m.l1InfoRootValue, nilL1InfoTreeIndex, 2) + assert.Error(t, err) +} + +func TestMigration0023(t *testing.T) { + m := migrationTest0023{ + migrationBase: migrationBase{ + newIndexes: []string{ + "idx_exit_root_l1_info_tree_recursive_index", + }, + newColumns: []columnMetadata{ + {"state", "exit_root", "l1_info_tree_recursive_index"}, + }, + }, + + blockHashValue: "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", + mainExitRootValue: "0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", + rollupExitRootValue: "0xadb91a6a1fce56eaea561002bc9a993f4e65a7710bd72f4eee3067cbd73a743c", + globalExitRootValue: "0x5bf4af1a651a2a74b36e6eb208481f94c69fc959f756223dfa49608061937585", + previousBlockHashValue: "0xe865e912b504572a4d80ad018e29797e3c11f00bf9ae2549548a25779c9d7e57", + l1InfoRootValue: "0x2b9484b83c6398033241865b015fb9430eb3e159182a6075d00c924845cc393e", + } + runMigrationTest(t, 23, m) +} diff --git a/db/migrations/state/0024.sql b/db/migrations/state/0024.sql new file mode 100644 index 0000000000..5a7dc776ac --- /dev/null +++ b/db/migrations/state/0024.sql @@ -0,0 +1,57 @@ +-- +migrate Up + +CREATE TABLE IF NOT EXISTS state.blob_sequence +( + index BIGINT PRIMARY KEY, + coinbase VARCHAR, + final_acc_input_hash VARCHAR, + first_blob_sequenced BIGINT, + last_blob_sequenced BIGINT, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + received_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + block_num BIGINT NOT NULL REFERENCES state.block (block_num) ON DELETE CASCADE +); + +comment on column state.blob_sequence.index is 'It is the id of this sequence, this value is internal and incremental'; +comment on column state.blob_sequence.block_num is 'L1 Block where appear this sequence'; +comment on column state.blob_sequence.first_blob_sequenced is 'first (included) blob_inner_num of this sequence (state.blob_inner.blob_inner_num)'; +comment on column state.blob_sequence.first_blob_sequenced is 'last (included) blob_inner_num of this sequence (state.blob_inner.blob_inner_num)'; +comment on column state.blob_sequence.received_at is 'time when it was received in node'; +comment on column state.blob_sequence.created_at is 'time when was created on L1 (L1block tstamp)'; + +CREATE TABLE IF NOT EXISTS state.blob_inner_in +( + blob_inner_num BIGINT PRIMARY KEY, + blob_sequence_index BIGINT NOT NULL REFERENCES state.blob_sequence (index) ON DELETE CASCADE, + blob_type VARCHAR, + max_sequence_timestamp TIMESTAMP WITH TIME ZONE, + zk_gas_limit BIGINT, + l1_info_tree_leaf_index BIGINT, + l1_info_tree_root VARCHAR, + blob_data_hash VARCHAR, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + -- if blob_type== blob + blob_type_index BIGINT, + blob_type_z VARCHAR, + blob_type_y VARCHAR, + blob_type_commitment VARCHAR, + blob_type_proof VARCHAR +); + +comment on column state.blob_inner_in.updated_at is 'the creation time is blob_sequence.created_at, this is the last time when was updated (tipically Now() )'; +comment on column state.blob_inner_in.blob_type is 'call_data, blob or forced'; +comment on column state.blob_inner_in.blob_data_hash is 'is the hash of the blobData'; + +CREATE TABLE IF NOT EXISTS state.incoming_batch +( + batch_num BIGINT PRIMARY KEY, + blob_inner_num BIGINT NOT NULL REFERENCES state.blob_inner_in (blob_inner_num) ON DELETE CASCADE, + data BYTEA, + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW() +); + +-- +migrate Down +DROP TABLE IF EXISTS state.incoming_batch; +DROP TABLE IF EXISTS state.blob_inner_in; +DROP TABLE IF EXISTS state.blob_sequence; diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 5ff931bad9..9b7ed0345e 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -44,13 +44,15 @@
"300ms"
 

Default: 10Type: integer

ResourceExhaustedMarginPct is the percentage window of the resource left out for the batch to be closed


Default: 64Type: integer

ForcedBatchesL1BlockConfirmations is number of blocks to consider GER final


Default: 64Type: integer

L1InfoTreeL1BlockConfirmations is number of blocks to consider L1InfoRoot final


Default: "10s"Type: string

ForcedBatchesCheckInterval is used by the closing signals manager to wait for its operation


Examples:

"1m"
 
"300ms"
-

Default: "10s"Type: string

L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated


Examples:

"1m"
+

Default: "10s"Type: string

L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated


Examples:

"1m"
 
"300ms"
-

Default: "10s"Type: string

BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch


Examples:

"1m"
+

Default: "30m0s"Type: string

BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch


Examples:

"1m"
 
"300ms"
 

Default: "3s"Type: string

L2BlockMaxDeltaTimestamp is the resolution of the timestamp used to close a L2 block


Examples:

"1m"
 
"300ms"
-

Default: 0Type: integer

HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number


Default: falseType: boolean

SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)


Default: trueType: boolean

SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func


Metrics is the config for the sequencer metrics
Default: "1h0m0s"Type: string

Interval is the interval of time to calculate sequencer metrics


Examples:

"1m"
+

Default: "1h0m0s"Type: string

StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with
the stateroot used in the tx-by-tx execution


Examples:

"1m"
+
"300ms"
+

Default: 0Type: integer

HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number


Default: falseType: boolean

SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)


Default: falseType: boolean

SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func


Metrics is the config for the sequencer metrics
Default: "1h0m0s"Type: string

Interval is the interval of time to calculate sequencer metrics


Examples:

"1m"
 
"300ms"
 

Default: trueType: boolean

EnableLog is a flag to enable/disable metrics logs


StreamServerCfg is the config for the stream server
Default: 0Type: integer

Port to listen on


Default: ""Type: string

Filename of the binary data file


Default: 0Type: integer

Version of the binary data file


Default: 0Type: integer

ChainID is the chain ID


Default: falseType: boolean

Enabled is a flag to enable/disable the data streamer


Log is the log configuration
Default: ""Type: enum (of string)

Must be one of:

  • "production"
  • "development"

Default: ""Type: enum (of string)

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Type: array of string

Each item of this array must be:


Default: 0Type: integer

UpgradeEtrogBatchNumber is the batch number of the upgrade etrog


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index efa2a0abb5..157e6051aa 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -2098,6 +2098,7 @@ StateConsistencyCheckInterval="5s"
 | - [L1InfoTreeCheckInterval](#Sequencer_Finalizer_L1InfoTreeCheckInterval )                     | No      | string  | No         | -          | Duration                                                                                                                                                                                                      |
 | - [BatchMaxDeltaTimestamp](#Sequencer_Finalizer_BatchMaxDeltaTimestamp )                       | No      | string  | No         | -          | Duration                                                                                                                                                                                                      |
 | - [L2BlockMaxDeltaTimestamp](#Sequencer_Finalizer_L2BlockMaxDeltaTimestamp )                   | No      | string  | No         | -          | Duration                                                                                                                                                                                                      |
+| - [StateRootSyncInterval](#Sequencer_Finalizer_StateRootSyncInterval )                         | No      | string  | No         | -          | Duration                                                                                                                                                                                                      |
 | - [HaltOnBatchNumber](#Sequencer_Finalizer_HaltOnBatchNumber )                                 | No      | integer | No         | -          | HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.
The Sequencer will halt after it closes the batch equal to this number | | - [SequentialBatchSanityCheck](#Sequencer_Finalizer_SequentialBatchSanityCheck ) | No | boolean | No | - | SequentialBatchSanityCheck indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel) | | - [SequentialProcessL2Block](#Sequencer_Finalizer_SequentialProcessL2Block ) | No | boolean | No | - | SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead
in the processPendingL2Blocks go func | @@ -2231,7 +2232,7 @@ ForcedBatchesCheckInterval="10s" **Default:** `"10s"` -**Description:** L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated +**Description:** L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated **Examples:** @@ -2255,7 +2256,7 @@ L1InfoTreeCheckInterval="10s" **Type:** : `string` -**Default:** `"10s"` +**Default:** `"30m0s"` **Description:** BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch @@ -2269,10 +2270,10 @@ L1InfoTreeCheckInterval="10s" "300ms" ``` -**Example setting the default value** ("10s"): +**Example setting the default value** ("30m0s"): ``` [Sequencer.Finalizer] -BatchMaxDeltaTimestamp="10s" +BatchMaxDeltaTimestamp="30m0s" ``` #### 10.7.9. `Sequencer.Finalizer.L2BlockMaxDeltaTimestamp` @@ -2301,7 +2302,34 @@ BatchMaxDeltaTimestamp="10s" L2BlockMaxDeltaTimestamp="3s" ``` -#### 10.7.10. `Sequencer.Finalizer.HaltOnBatchNumber` +#### 10.7.10. `Sequencer.Finalizer.StateRootSyncInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1h0m0s"` + +**Description:** StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with +the stateroot used in the tx-by-tx execution + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1h0m0s"): +``` +[Sequencer.Finalizer] +StateRootSyncInterval="1h0m0s" +``` + +#### 10.7.11. `Sequencer.Finalizer.HaltOnBatchNumber` **Type:** : `integer` @@ -2316,7 +2344,7 @@ The Sequencer will halt after it closes the batch equal to this number HaltOnBatchNumber=0 ``` -#### 10.7.11. `Sequencer.Finalizer.SequentialBatchSanityCheck` +#### 10.7.12. `Sequencer.Finalizer.SequentialBatchSanityCheck` **Type:** : `boolean` @@ -2331,22 +2359,22 @@ sequential way (instead than in parallel) SequentialBatchSanityCheck=false ``` -#### 10.7.12. `Sequencer.Finalizer.SequentialProcessL2Block` +#### 10.7.13. `Sequencer.Finalizer.SequentialProcessL2Block` **Type:** : `boolean` -**Default:** `true` +**Default:** `false` **Description:** SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead in the processPendingL2Blocks go func -**Example setting the default value** (true): +**Example setting the default value** (false): ``` [Sequencer.Finalizer] -SequentialProcessL2Block=true +SequentialProcessL2Block=false ``` -#### 10.7.13. `[Sequencer.Finalizer.Metrics]` +#### 10.7.14. `[Sequencer.Finalizer.Metrics]` **Type:** : `object` **Description:** Metrics is the config for the sequencer metrics @@ -2356,7 +2384,7 @@ SequentialProcessL2Block=true | - [Interval](#Sequencer_Finalizer_Metrics_Interval ) | No | string | No | - | Duration | | - [EnableLog](#Sequencer_Finalizer_Metrics_EnableLog ) | No | boolean | No | - | EnableLog is a flag to enable/disable metrics logs | -##### 10.7.13.1. `Sequencer.Finalizer.Metrics.Interval` +##### 10.7.14.1. `Sequencer.Finalizer.Metrics.Interval` **Title:** Duration @@ -2382,7 +2410,7 @@ SequentialProcessL2Block=true Interval="1h0m0s" ``` -##### 10.7.13.2. `Sequencer.Finalizer.Metrics.EnableLog` +##### 10.7.14.2. `Sequencer.Finalizer.Metrics.EnableLog` **Type:** : `boolean` diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json index 50ec96a3ff..8bd7323c07 100644 --- a/docs/config-file/node-config-schema.json +++ b/docs/config-file/node-config-schema.json @@ -830,7 +830,7 @@ "L1InfoTreeCheckInterval": { "type": "string", "title": "Duration", - "description": "L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated", + "description": "L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated", "default": "10s", "examples": [ "1m", @@ -841,7 +841,7 @@ "type": "string", "title": "Duration", "description": "BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch", - "default": "10s", + "default": "30m0s", "examples": [ "1m", "300ms" @@ -857,6 +857,16 @@ "300ms" ] }, + "StateRootSyncInterval": { + "type": "string", + "title": "Duration", + "description": "StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with\nthe stateroot used in the tx-by-tx execution", + "default": "1h0m0s", + "examples": [ + "1m", + "300ms" + ] + }, "HaltOnBatchNumber": { "type": "integer", "description": "HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches.\nThe Sequencer will halt after it closes the batch equal to this number", @@ -870,7 +880,7 @@ "SequentialProcessL2Block": { "type": "boolean", "description": "SequentialProcessL2Block indicates if the processing of a L2 Block must be done in the same finalizer go func instead\nin the processPendingL2Blocks go func", - "default": true + "default": false }, "Metrics": { "properties": { diff --git a/event/event.go b/event/event.go index e6a72799ce..6e486e21ad 100644 --- a/event/event.go +++ b/event/event.go @@ -50,6 +50,8 @@ const ( EventID_ReservedZKCountersOverflow EventID = "RESERVED ZKCOUNTERS OVERFLOW" // EventID_InvalidInfoRoot is triggered when an invalid l1InfoRoot was synced EventID_InvalidInfoRoot EventID = "INVALID INFOROOT" + // EventID_L2BlockReorg is triggered when a L2 block reorg has happened in the sequencer + EventID_L2BlockReorg EventID = "L2 BLOCK REORG" // Source_Node is the source of the event Source_Node Source = "node" diff --git a/go.mod b/go.mod index 299a5b0161..dfa30ee85b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/0xPolygonHermez/zkevm-node go 1.21 require ( - github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2 + github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1 github.com/didip/tollbooth/v6 v6.1.2 github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 github.com/ethereum/go-ethereum v1.13.14 diff --git a/go.sum b/go.sum index c863b6bb2c..0a6f1debc2 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2 h1:XRMTk+W6vtJVGVjuEznfWyNt7HkRkkuSmlN5Y6p60Sc= -github.com/0xPolygonHermez/zkevm-data-streamer v0.2.2/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1 h1:4wbCJOGcZ8BTuOfNFrcZ1cAVfTWaX1W9EYHaDx3imLc= +github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-0.20240426122934-6f47d2485fc1/go.mod h1:0QkAXcFa92mFJrCbN3UPUJGJYes851yEgYHLONnaosE= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go index 9c0d8d996e..3b2d4847c9 100644 --- a/sequencer/addrqueue.go +++ b/sequencer/addrqueue.go @@ -121,22 +121,25 @@ func (a *addrQueue) IsEmpty() bool { } // deleteTx deletes the tx from the addrQueue -func (a *addrQueue) deleteTx(txHash common.Hash) (deletedReadyTx *TxTracker) { +func (a *addrQueue) deleteTx(txHash common.Hash) (deletedTx *TxTracker, isReady bool) { txHashStr := txHash.String() if (a.readyTx != nil) && (a.readyTx.HashStr == txHashStr) { log.Infof("deleting readyTx %s from addrQueue %s", txHashStr, a.fromStr) prevReadyTx := a.readyTx a.readyTx = nil - return prevReadyTx + return prevReadyTx, true } else { + var deletedTx *TxTracker for _, txTracker := range a.notReadyTxs { if txTracker.HashStr == txHashStr { + deletedTx = txTracker log.Infof("deleting notReadyTx %s from addrQueue %s", txHashStr, a.fromStr) delete(a.notReadyTxs, txTracker.Nonce) + break } } - return nil + return deletedTx, false } } @@ -158,6 +161,22 @@ func (a *addrQueue) deletePendingTxToStore(txHash common.Hash) { } } +func (a *addrQueue) getTransactions() []*TxTracker { + // TODO: Add test for this function + + txsList := []*TxTracker{} + + if a.readyTx != nil { + txsList = append(txsList, a.readyTx) + } + + for _, tx := range a.notReadyTxs { + txsList = append(txsList, tx) + } + + return txsList +} + // updateCurrentNonceBalance updates the nonce and balance of the addrQueue and updates the ready and notReady txs func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) (newReadyTx, prevReadyTx *TxTracker, toDelete []*TxTracker) { var oldReadyTx *TxTracker = nil @@ -179,7 +198,7 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) ( } } for _, txTracker := range txsToDelete { - log.Infof("deleting notReadyTx with nonce %d from addrQueue %s", txTracker.Nonce, a.fromStr) + log.Infof("deleting notReadyTx with nonce %d from addrQueue %s, reason: %s", txTracker.Nonce, a.fromStr, *txTracker.FailedReason) delete(a.notReadyTxs, txTracker.Nonce) } } diff --git a/sequencer/addrqueue_test.go b/sequencer/addrqueue_test.go index d39ce5a356..a04e0ee793 100644 --- a/sequencer/addrqueue_test.go +++ b/sequencer/addrqueue_test.go @@ -164,11 +164,11 @@ func TestAddrQueue(t *testing.T) { t.Run("Delete readyTx 0x01", func(t *testing.T) { tc := addTxTestCases[2] tx := newTestTxTracker(tc.hash, tc.nonce, tc.gasPrice, tc.cost) - deltx := addr.deleteTx(tx.Hash) + deltx, isReady := addr.deleteTx(tx.Hash) if !(addr.readyTx == nil) { t.Fatalf("Error readyTx not nil. Expected=%s, Actual=%s", "", addr.readyTx.HashStr) } - if !(deltx.HashStr == tx.HashStr) { + if !isReady || !(deltx.HashStr == tx.HashStr) { t.Fatalf("Error returning deletedReadyTx. Expected=%s, Actual=%s", tx.HashStr, deltx.HashStr) } }) diff --git a/sequencer/batch.go b/sequencer/batch.go index 26fda94aa4..ecaa93aca7 100644 --- a/sequencer/batch.go +++ b/sequencer/batch.go @@ -7,30 +7,32 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/event" - "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/ethereum/go-ethereum/common" + "github.com/jackc/pgx/v4" ) // Batch represents a wip or processed batch. type Batch struct { - batchNumber uint64 - coinbase common.Address - timestamp time.Time - initialStateRoot common.Hash // initial stateRoot of the batch - imStateRoot common.Hash // intermediate stateRoot when processing tx-by-tx - finalStateRoot common.Hash // final stateroot of the batch when a L2 block is processed - countOfTxs int - countOfL2Blocks int - imRemainingResources state.BatchResources // remaining batch resources when processing tx-by-tx - finalRemainingResources state.BatchResources // remaining batch resources when a L2 block is processed - closingReason state.ClosingReason + batchNumber uint64 + coinbase common.Address + timestamp time.Time + initialStateRoot common.Hash // initial stateRoot of the batch + imStateRoot common.Hash // intermediate stateRoot when processing tx-by-tx + finalStateRoot common.Hash // final stateroot of the batch when a L2 block is processed + countOfTxs int + countOfL2Blocks int + imRemainingResources state.BatchResources // remaining batch resources when processing tx-by-tx + imHighReservedZKCounters state.ZKCounters + finalRemainingResources state.BatchResources // remaining batch resources when a L2 block is processed + finalHighReservedZKCounters state.ZKCounters + closingReason state.ClosingReason } -func (w *Batch) isEmpty() bool { - return w.countOfL2Blocks == 0 +func (b *Batch) isEmpty() bool { + return b.countOfL2Blocks == 0 } // processBatchesPendingtoCheck performs a sanity check for batches closed but pending to be checked @@ -78,23 +80,25 @@ func (f *finalizer) setWIPBatch(ctx context.Context, wipStateBatch *state.Batch) wipStateBatchCountOfTxs = wipStateBatchCountOfTxs + len(rawBlock.Transactions) } - remainingResources := getMaxRemainingResources(f.batchConstraints) + remainingResources := getMaxBatchResources(f.batchConstraints) overflow, overflowResource := remainingResources.Sub(wipStateBatch.Resources) if overflow { - return nil, fmt.Errorf("failed to subtract used resources when setting the WIP batch to the state batch %d, overflow resource: %s", wipStateBatch.BatchNumber, overflowResource) + return nil, fmt.Errorf("failed to subtract used resources when setting the wip batch to the state batch %d, overflow resource: %s", wipStateBatch.BatchNumber, overflowResource) } wipBatch := &Batch{ - batchNumber: wipStateBatch.BatchNumber, - coinbase: wipStateBatch.Coinbase, - imStateRoot: wipStateBatch.StateRoot, - initialStateRoot: prevStateBatch.StateRoot, - finalStateRoot: wipStateBatch.StateRoot, - timestamp: wipStateBatch.Timestamp, - countOfL2Blocks: len(wipStateBatchBlocks.Blocks), - countOfTxs: wipStateBatchCountOfTxs, - imRemainingResources: remainingResources, - finalRemainingResources: remainingResources, + batchNumber: wipStateBatch.BatchNumber, + coinbase: wipStateBatch.Coinbase, + imStateRoot: wipStateBatch.StateRoot, + initialStateRoot: prevStateBatch.StateRoot, + finalStateRoot: wipStateBatch.StateRoot, + timestamp: wipStateBatch.Timestamp, + countOfL2Blocks: len(wipStateBatchBlocks.Blocks), + countOfTxs: wipStateBatchCountOfTxs, + imRemainingResources: remainingResources, + finalRemainingResources: remainingResources, + imHighReservedZKCounters: wipStateBatch.HighReservedZKCounters, + finalHighReservedZKCounters: wipStateBatch.HighReservedZKCounters, } return wipBatch, nil @@ -126,22 +130,52 @@ func (f *finalizer) initWIPBatch(ctx context.Context) { if lastStateBatch.BatchNumber+1 == f.cfg.HaltOnBatchNumber { f.Halt(ctx, fmt.Errorf("finalizer reached stop sequencer on batch number: %d", f.cfg.HaltOnBatchNumber), false) } - - f.wipBatch, err = f.openNewWIPBatch(ctx, lastStateBatch.BatchNumber+1, lastStateBatch.StateRoot) - if err != nil { - log.Fatalf("failed to open new wip batch, error: %v", err) - } - } else { /// if it's not closed, it is the wip state batch, set it as wip batch in the finalizer + f.wipBatch = f.openNewWIPBatch(lastStateBatch.BatchNumber+1, lastStateBatch.StateRoot) + f.pipBatch = nil + f.sipBatch = nil + } else { /// if it's not closed, it is the wip/pip/sip batch f.wipBatch, err = f.setWIPBatch(ctx, lastStateBatch) if err != nil { log.Fatalf("failed to set wip batch, error: %v", err) } + f.pipBatch = f.wipBatch + f.sipBatch = f.wipBatch } log.Infof("initial batch: %d, initialStateRoot: %s, stateRoot: %s, coinbase: %s", f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot, f.wipBatch.coinbase) } +func (f *finalizer) processL2BlockReorg(ctx context.Context) error { + f.waitPendingL2Blocks() + + if f.sipBatch != nil && f.sipBatch.batchNumber != f.wipBatch.batchNumber { + // If the sip batch is the previous to the current wip batch and it's still open these means that the L2 block that caused + // the reorg is the first L2 block of the wip batch, therefore we need to close sip batch before to continue. + // If we don't close the sip batch the initWIPBatch function will load the sip batch as the initial one and when trying to reprocess + // the first tx reorged we can have a batch resource overflow (if we have closed the sip batch for this reason) and we will return + // the reorged tx to the worker (calling UpdateTxZKCounters) missing the order in which we need to reprocess the reorged txs + + err := f.finalizeSIPBatch(ctx) + if err != nil { + return fmt.Errorf("error finalizing sip batch, error: %v", err) + } + } + + f.workerIntf.RestoreTxsPendingToStore(ctx) + + f.initWIPBatch(ctx) + + f.initWIPL2Block(ctx) + + // Since when processing the L2 block reorg we sync the state root we can reset next state root syncing + f.scheduleNextStateRootSync() + + f.l2BlockReorg.Store(false) + + return nil +} + // finalizeWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.ClosingReason) { prevTimestamp := f.wipL2Block.timestamp @@ -154,7 +188,7 @@ func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.Clos err := f.closeAndOpenNewWIPBatch(ctx, closeReason) if err != nil { - f.Halt(ctx, fmt.Errorf("failed to create new WIP batch, error: %v", err), true) + f.Halt(ctx, fmt.Errorf("failed to create new wip batch, error: %v", err), true) } // If we have closed the wipL2Block then we open a new one @@ -163,88 +197,126 @@ func (f *finalizer) finalizeWIPBatch(ctx context.Context, closeReason state.Clos } } +// finalizeSIPBatch closes the current store-in-progress batch +func (f *finalizer) finalizeSIPBatch(ctx context.Context) error { + dbTx, err := f.stateIntf.BeginStateTransaction(ctx) + if err != nil { + return fmt.Errorf("error creating db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, err) + } + + // Close sip batch (close in statedb) + err = f.closeSIPBatch(ctx, dbTx) + if err != nil { + return fmt.Errorf("failed to close sip batch %d, error: %v", f.sipBatch.batchNumber, err) + } + + if err != nil { + rollbackErr := dbTx.Rollback(ctx) + if rollbackErr != nil { + return fmt.Errorf("error when rollback db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, rollbackErr) + } + return err + } + + err = dbTx.Commit(ctx) + if err != nil { + return fmt.Errorf("error when commit db transaction to close sip batch %d, error: %v", f.sipBatch.batchNumber, err) + } + + return nil +} + // closeAndOpenNewWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new wip batch func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason state.ClosingReason) error { f.nextForcedBatchesMux.Lock() processForcedBatches := len(f.nextForcedBatches) > 0 f.nextForcedBatchesMux.Unlock() - // If we will process forced batches after we close the wip batch then we must close the current wip L2 block, - // since the processForcedBatches function needs to create new L2 blocks (cannot "reuse" the current wip L2 block if it's empty) + f.wipBatch.closingReason = closeReason + + var lastStateRoot common.Hash + + //TODO: review forced batches implementation since is not good "idea" to check here for forced batches, maybe is better to do it on finalizeBatches loop if processForcedBatches { + // If we have reach the time to sync stateroot or we will process forced batches we must close the current wip L2 block and wip batch f.closeWIPL2Block(ctx) - } + // We need to wait that all pending L2 blocks are processed and stored + f.waitPendingL2Blocks() - // Wait until all L2 blocks are processed by the executor - startWait := time.Now() - f.pendingL2BlocksToProcessWG.Wait() - elapsed := time.Since(startWait) - log.Debugf("waiting for pending L2 blocks to be processed took: %v", elapsed) + lastStateRoot = f.sipBatch.finalStateRoot - // Wait until all L2 blocks are store - startWait = time.Now() - f.pendingL2BlocksToStoreWG.Wait() - log.Debugf("waiting for pending L2 blocks to be stored took: %v", time.Since(startWait)) + err := f.finalizeSIPBatch(ctx) + if err != nil { + return fmt.Errorf("error finalizing sip batch %d when processing forced batches, error: %v", f.sipBatch.batchNumber, err) + } + } else { + lastStateRoot = f.wipBatch.imStateRoot + } - f.wipBatch.closingReason = closeReason + // Close the wip batch. After will close them f.wipBatch will be nil, therefore we store in local variables the info we need from the f.wipBatch + lastBatchNumber := f.wipBatch.batchNumber - // Close the wip batch - var err error - err = f.closeWIPBatch(ctx) - if err != nil { - return fmt.Errorf("failed to close batch, error: %v", err) - } + f.closeWIPBatch(ctx) - log.Infof("batch %d closed, closing reason: %s", f.wipBatch.batchNumber, closeReason) + if lastBatchNumber+1 == f.cfg.HaltOnBatchNumber { + f.waitPendingL2Blocks() - // Reprocess full batch as sanity check - if f.cfg.SequentialBatchSanityCheck { - // Do the full batch reprocess now - _, _ = f.batchSanityCheck(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot) - } else { - // Do the full batch reprocess in parallel - go func() { - _, _ = f.batchSanityCheck(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.finalStateRoot) - }() - } + // We finalize the current sip batch + err := f.finalizeSIPBatch(ctx) + if err != nil { + return fmt.Errorf("error finalizing sip batch %d when halting on batch %d", f.sipBatch.batchNumber, f.cfg.HaltOnBatchNumber) + } - if f.wipBatch.batchNumber+1 == f.cfg.HaltOnBatchNumber { f.Halt(ctx, fmt.Errorf("finalizer reached stop sequencer on batch number: %d", f.cfg.HaltOnBatchNumber), false) } - // Metadata for the next batch - stateRoot := f.wipBatch.finalStateRoot - lastBatchNumber := f.wipBatch.batchNumber - // Process forced batches if processForcedBatches { - lastBatchNumber, stateRoot = f.processForcedBatches(ctx, lastBatchNumber, stateRoot) - // We must init/reset the wip L2 block from the state since processForcedBatches can created new L2 blocks - f.initWIPL2Block(ctx) + lastBatchNumber, lastStateRoot = f.processForcedBatches(ctx, lastBatchNumber, lastStateRoot) } - f.wipBatch, err = f.openNewWIPBatch(ctx, lastBatchNumber+1, stateRoot) - if err != nil { - return fmt.Errorf("failed to open new wip batch, error: %v", err) - } + f.wipBatch = f.openNewWIPBatch(lastBatchNumber+1, lastStateRoot) - if f.wipL2Block != nil { + if processForcedBatches { + // We need to init/reset the wip L2 block in case we have processed forced batches + f.initWIPL2Block(ctx) + } else if f.wipL2Block != nil { + // If we are "reusing" the wip L2 block because it's empty we assign it to the new wip batch f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot - // Subtract the WIP L2 block used resources to batch - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes}) + f.wipL2Block.batch = f.wipBatch + + // We subtract the wip L2 block used resources to the new wip batch + overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCountersOnNew, Bytes: f.wipL2Block.bytes}) if overflow { return fmt.Errorf("failed to subtract L2 block [%d] used resources to new wip batch %d, overflow resource: %s", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, overflowResource) } } - log.Infof("new WIP batch %d", f.wipBatch.batchNumber) + log.Infof("new wip batch %d", f.wipBatch.batchNumber) return nil } // openNewWIPBatch opens a new batch in the state and returns it as WipBatch -func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNumber uint64, stateRoot common.Hash) (*Batch, error) { +func (f *finalizer) openNewWIPBatch(batchNumber uint64, stateRoot common.Hash) *Batch { + maxRemainingResources := getMaxBatchResources(f.batchConstraints) + + return &Batch{ + batchNumber: batchNumber, + coinbase: f.sequencerAddress, + initialStateRoot: stateRoot, + imStateRoot: stateRoot, + finalStateRoot: stateRoot, + timestamp: now(), + imRemainingResources: maxRemainingResources, + finalRemainingResources: maxRemainingResources, + closingReason: state.EmptyClosingReason, + } +} + +// insertSIPBatch inserts a new state-in-progress batch in the state db +func (f *finalizer) insertSIPBatch(ctx context.Context, batchNumber uint64, stateRoot common.Hash, dbTx pgx.Tx) error { // open next batch newStateBatch := state.Batch{ BatchNumber: batchNumber, @@ -255,82 +327,83 @@ func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNumber uint64, sta LocalExitRoot: state.ZeroHash, } - dbTx, err := f.stateIntf.BeginStateTransaction(ctx) - if err != nil { - return nil, fmt.Errorf("failed to begin state transaction to open batch, error: %v", err) - } - // OpenBatch opens a new wip batch in the state - err = f.stateIntf.OpenWIPBatch(ctx, newStateBatch, dbTx) + //TODO: rename OpenWipBatch to InsertBatch + err := f.stateIntf.OpenWIPBatch(ctx, newStateBatch, dbTx) if err != nil { - if rollbackErr := dbTx.Rollback(ctx); rollbackErr != nil { - return nil, fmt.Errorf("failed to rollback due to error when open a new wip batch, rollback error: %v, error: %v", rollbackErr, err) - } - return nil, fmt.Errorf("failed to open new wip batch, error: %v", err) - } - - if err := dbTx.Commit(ctx); err != nil { - return nil, fmt.Errorf("failed to commit database transaction for opening a wip batch, error: %v", err) + return fmt.Errorf("failed to insert new batch in state db, error: %v", err) } // Send batch bookmark to the datastream f.DSSendBatchBookmark(batchNumber) // Check if synchronizer is up-to-date + //TODO: review if this is needed for !f.isSynced(ctx) { log.Info("wait for synchronizer to sync last batch") time.Sleep(time.Second) } - maxRemainingResources := getMaxRemainingResources(f.batchConstraints) - - return &Batch{ - batchNumber: newStateBatch.BatchNumber, - coinbase: newStateBatch.Coinbase, - initialStateRoot: newStateBatch.StateRoot, - imStateRoot: newStateBatch.StateRoot, - finalStateRoot: newStateBatch.StateRoot, - timestamp: newStateBatch.Timestamp, - imRemainingResources: maxRemainingResources, - finalRemainingResources: maxRemainingResources, - closingReason: state.EmptyClosingReason, - }, err + return nil } -// closeWIPBatch closes the current batch in the state -func (f *finalizer) closeWIPBatch(ctx context.Context) error { +// closeWIPBatch closes the current wip batch +func (f *finalizer) closeWIPBatch(ctx context.Context) { // Sanity check: batch must not be empty (should have L2 blocks) if f.wipBatch.isEmpty() { - f.Halt(ctx, fmt.Errorf("closing WIP batch %d without L2 blocks and should have at least 1", f.wipBatch.batchNumber), false) + f.Halt(ctx, fmt.Errorf("closing wip batch %d without L2 blocks and should have at least 1", f.wipBatch.batchNumber), false) + } + + log.Infof("wip batch %d closed, closing reason: %s", f.wipBatch.batchNumber, f.wipBatch.closingReason) + + f.wipBatch = nil +} + +// closeSIPBatch closes the current sip batch in the state +func (f *finalizer) closeSIPBatch(ctx context.Context, dbTx pgx.Tx) error { + // Sanity check: this can't happen + if f.sipBatch == nil { + f.Halt(ctx, fmt.Errorf("closing sip batch that is nil"), false) } - usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources) + // Sanity check: batch must not be empty (should have L2 blocks) + if f.sipBatch.isEmpty() { + f.Halt(ctx, fmt.Errorf("closing sip batch %d without L2 blocks and should have at least 1", f.sipBatch.batchNumber), false) + } + + usedResources := getUsedBatchResources(f.batchConstraints, f.sipBatch.imRemainingResources) receipt := state.ProcessingReceipt{ - BatchNumber: f.wipBatch.batchNumber, + BatchNumber: f.sipBatch.batchNumber, BatchResources: usedResources, - ClosingReason: f.wipBatch.closingReason, + ClosingReason: f.sipBatch.closingReason, } - dbTx, err := f.stateIntf.BeginStateTransaction(ctx) + err := f.stateIntf.CloseWIPBatch(ctx, receipt, dbTx) + if err != nil { return err } - err = f.stateIntf.CloseWIPBatch(ctx, receipt, dbTx) - if err != nil { - rollbackErr := dbTx.Rollback(ctx) - if rollbackErr != nil { - log.Errorf("error rolling back due to error when closing wip batch, rollback error: %v, error: %v", rollbackErr, err) - } - return err + // We store values needed for the batch sanity check in local variables, as we can execute the sanity check in a go func (parallel) and in this case f.sipBatch will be nil during some time + batchNumber := f.sipBatch.batchNumber + initialStateRoot := f.sipBatch.initialStateRoot + finalStateRoot := f.sipBatch.finalStateRoot + + // Reprocess full batch as sanity check + if f.cfg.SequentialBatchSanityCheck { + // Do the full batch reprocess now + _, _ = f.batchSanityCheck(ctx, batchNumber, initialStateRoot, finalStateRoot) } else { - err := dbTx.Commit(ctx) - if err != nil { - log.Errorf("error committing close wip batch, error: %v", err) - return err - } + // Do the full batch reprocess in parallel + go func() { + _, _ = f.batchSanityCheck(ctx, batchNumber, initialStateRoot, finalStateRoot) + }() } + log.Infof("sip batch %d closed in statedb, closing reason: %s", f.sipBatch.batchNumber, f.sipBatch.closingReason) + + f.sipBatch = nil + return nil } @@ -345,12 +418,16 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi // Log batch detailed info log.Errorf("batch %d sanity check error: initialStateRoot: %s, expectedNewStateRoot: %s", batch.BatchNumber, initialStateRoot, expectedNewStateRoot) - for i, rawL2block := range rawL2Blocks.Blocks { - log.Infof("block[%d], txs: %d, deltaTimestamp: %d, l1InfoTreeIndex: %d", i, len(rawL2block.Transactions), rawL2block.DeltaTimestamp, rawL2block.IndexL1InfoTree) - for j, rawTx := range rawL2block.Transactions { - log.Infof("block[%d].tx[%d]: %s, egpPct: %d, data: %s", batch.BatchNumber, i, j, rawTx.Tx.Hash(), rawTx.EfficiencyPercentage, hex.EncodeToHex(rawTx.Data)) + batchLog := "" + totalTxs := 0 + for blockIdx, rawL2block := range rawL2Blocks.Blocks { + totalTxs += len(rawL2block.Transactions) + batchLog += fmt.Sprintf("block[%d], txs: %d, deltaTimestamp: %d, l1InfoTreeIndex: %d\n", blockIdx, len(rawL2block.Transactions), rawL2block.DeltaTimestamp, rawL2block.IndexL1InfoTree) + for txIdx, rawTx := range rawL2block.Transactions { + batchLog += fmt.Sprintf(" tx[%d]: %s, egpPct: %d\n", txIdx, rawTx.Tx.Hash(), rawTx.EfficiencyPercentage) } } + log.Infof("dump batch %d, blocks: %d, txs: %d\n%s", batch.BatchNumber, len(rawL2Blocks.Blocks), totalTxs, batchLog) f.Halt(ctx, fmt.Errorf("batch sanity check error. Check previous errors in logs to know which was the cause"), false) } @@ -381,10 +458,8 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi return nil, ErrGetBatchByNumber } - var batchResponse *state.ProcessBatchResponse - startProcessing := time.Now() - batchResponse, err = f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) + batchResponse, contextid, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) endProcessing := time.Now() if err != nil { @@ -426,9 +501,9 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi return nil, ErrUpdateBatchAsChecked } - log.Infof("successful sanity check for batch %d, initialStateRoot: %s, stateRoot: %s, l2Blocks: %d, time: %v, used counters: %s", + log.Infof("successful sanity check for batch %d, initialStateRoot: %s, stateRoot: %s, l2Blocks: %d, time: %v, used counters: %s, contextId: %s", batch.BatchNumber, initialStateRoot, batchResponse.NewStateRoot.String(), len(batchResponse.BlockResponses), - endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters)) + endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters), contextid) return batchResponse, nil } @@ -506,8 +581,8 @@ func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResou } } -// getMaxRemainingResources returns the max resources that can be used in a batch -func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.BatchResources { +// getMaxBatchResources returns the max resources that can be used in a batch +func getMaxBatchResources(constraints state.BatchConstraintsCfg) state.BatchResources { return state.BatchResources{ ZKCounters: state.ZKCounters{ GasUsed: constraints.MaxCumulativeGasUsed, @@ -524,6 +599,51 @@ func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.Batch } } +// getNeededZKCounters returns the needed counters to fit a tx in the wip batch. The needed counters are the counters used by the tx plus the high reserved counters. +// It will take into account the current high reserved counter got with previous txs but also checking reserved counters diff needed by this tx, since could be greater. +func getNeededZKCounters(highReservedCounters state.ZKCounters, usedCounters state.ZKCounters, reservedCounters state.ZKCounters) (state.ZKCounters, state.ZKCounters) { + neededCounter := func(counterName string, highCounter uint32, usedCounter uint32, reservedCounter uint32) (uint32, uint32) { + if reservedCounter < usedCounter { + log.Warnf("%s reserved counter %d is less than used counter %d, this shouldn't be possible", counterName, reservedCounter, usedCounter) + return usedCounter + highCounter, highCounter + } + diffReserved := reservedCounter - usedCounter + if diffReserved > highCounter { // reserved counter for this tx (difference) is greater that the high reserved counter got in previous txs + return usedCounter + diffReserved, diffReserved + } else { + return usedCounter + highCounter, highCounter + } + } + + needed := state.ZKCounters{} + newHigh := state.ZKCounters{} + + needed.Arithmetics, newHigh.Arithmetics = neededCounter("Arithmetics", highReservedCounters.Arithmetics, usedCounters.Arithmetics, reservedCounters.Arithmetics) + needed.Binaries, newHigh.Binaries = neededCounter("Binaries", highReservedCounters.Binaries, usedCounters.Binaries, reservedCounters.Binaries) + needed.KeccakHashes, newHigh.KeccakHashes = neededCounter("KeccakHashes", highReservedCounters.KeccakHashes, usedCounters.KeccakHashes, reservedCounters.KeccakHashes) + needed.MemAligns, newHigh.MemAligns = neededCounter("MemAligns", highReservedCounters.MemAligns, usedCounters.MemAligns, reservedCounters.MemAligns) + needed.PoseidonHashes, newHigh.PoseidonHashes = neededCounter("PoseidonHashes", highReservedCounters.PoseidonHashes, usedCounters.PoseidonHashes, reservedCounters.PoseidonHashes) + needed.PoseidonPaddings, newHigh.PoseidonPaddings = neededCounter("PoseidonPaddings", highReservedCounters.PoseidonPaddings, usedCounters.PoseidonPaddings, reservedCounters.PoseidonPaddings) + needed.Sha256Hashes_V2, newHigh.Sha256Hashes_V2 = neededCounter("Sha256Hashes_V2", highReservedCounters.Sha256Hashes_V2, usedCounters.Sha256Hashes_V2, reservedCounters.Sha256Hashes_V2) + needed.Steps, newHigh.Steps = neededCounter("Steps", highReservedCounters.Steps, usedCounters.Steps, reservedCounters.Steps) + + if reservedCounters.GasUsed < usedCounters.GasUsed { + log.Warnf("gasUsed reserved counter %d is less than used counter %d, this shouldn't be possible", reservedCounters.GasUsed, usedCounters.GasUsed) + needed.GasUsed = usedCounters.GasUsed + highReservedCounters.GasUsed + } else { + diffReserved := reservedCounters.GasUsed - usedCounters.GasUsed + if diffReserved > highReservedCounters.GasUsed { + needed.GasUsed = usedCounters.GasUsed + diffReserved + newHigh.GasUsed = diffReserved + } else { + needed.GasUsed = usedCounters.GasUsed + highReservedCounters.GasUsed + newHigh.GasUsed = highReservedCounters.GasUsed + } + } + + return needed, newHigh +} + // checkIfFinalizeBatch returns true if the batch must be closed due to a closing reason, also it returns the description of the close reason func (f *finalizer) checkIfFinalizeBatch() (bool, state.ClosingReason) { // Max txs per batch diff --git a/sequencer/config.go b/sequencer/config.go index 45210c4840..8b813c52db 100644 --- a/sequencer/config.go +++ b/sequencer/config.go @@ -70,7 +70,7 @@ type FinalizerCfg struct { // ForcedBatchesCheckInterval is used by the closing signals manager to wait for its operation ForcedBatchesCheckInterval types.Duration `mapstructure:"ForcedBatchesCheckInterval"` - // L1InfoTreeCheckInterval is the wait time to check if the L1InfoRoot has been updated + // L1InfoTreeCheckInterval is the time interval to check if the L1InfoRoot has been updated L1InfoTreeCheckInterval types.Duration `mapstructure:"L1InfoTreeCheckInterval"` // BatchMaxDeltaTimestamp is the resolution of the timestamp used to close a batch @@ -79,6 +79,10 @@ type FinalizerCfg struct { // L2BlockMaxDeltaTimestamp is the resolution of the timestamp used to close a L2 block L2BlockMaxDeltaTimestamp types.Duration `mapstructure:"L2BlockMaxDeltaTimestamp"` + // StateRootSyncInterval indicates how often the stateroot generated by the L2 block process will be synchronized with + // the stateroot used in the tx-by-tx execution + StateRootSyncInterval types.Duration `mapstructure:"StateRootSyncInterval"` + // HaltOnBatchNumber specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. // The Sequencer will halt after it closes the batch equal to this number HaltOnBatchNumber uint64 `mapstructure:"HaltOnBatchNumber"` diff --git a/sequencer/datastreamer.go b/sequencer/datastreamer.go index 700b8b3e02..7f5e7e763a 100644 --- a/sequencer/datastreamer.go +++ b/sequencer/datastreamer.go @@ -1,6 +1,7 @@ package sequencer import ( + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" ) @@ -42,6 +43,7 @@ func (f *finalizer) DSSendL2Block(batchNumber uint64, blockResponse *state.Proce l2Transactions = append(l2Transactions, l2Transaction) } + log.Infof("[ds-debug] sending l2block %d to datastream channel", blockResponse.BlockNumber) f.dataToStream <- state.DSL2FullBlock{ DSL2Block: l2Block, Txs: l2Transactions, diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index ff565f79da..6d5b74e50d 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -41,9 +41,13 @@ type finalizer struct { stateIntf stateInterface etherman ethermanInterface wipBatch *Batch + pipBatch *Batch // processing-in-progress batch is the batch that is being processing (L2 block process) + sipBatch *Batch // storing-in-progress batch is the batch that is being stored/updated in the state db wipL2Block *L2Block batchConstraints state.BatchConstraintsCfg haltFinalizer atomic.Bool + // stateroot sync + nextStateRootSync time.Time // forced batches nextForcedBatches []state.ForcedBatch nextForcedBatchDeadline int64 @@ -60,10 +64,12 @@ type finalizer struct { effectiveGasPrice *pool.EffectiveGasPrice // pending L2 blocks to process (executor) pendingL2BlocksToProcess chan *L2Block - pendingL2BlocksToProcessWG *sync.WaitGroup + pendingL2BlocksToProcessWG *WaitGroupCount + l2BlockReorg atomic.Bool + lastL2BlockWasReorg bool // pending L2 blocks to store in the state pendingL2BlocksToStore chan *L2Block - pendingL2BlocksToStoreWG *sync.WaitGroup + pendingL2BlocksToStoreWG *WaitGroupCount // L2 block counter for tracking purposes l2BlockCounter uint64 // executor flushid control @@ -106,6 +112,8 @@ func newFinalizer( stateIntf: stateIntf, etherman: etherman, batchConstraints: batchConstraints, + // stateroot sync + nextStateRootSync: time.Now().Add(cfg.StateRootSyncInterval.Duration), // forced batches nextForcedBatches: make([]state.ForcedBatch, 0), nextForcedBatchDeadline: 0, @@ -120,10 +128,10 @@ func newFinalizer( effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice), // pending L2 blocks to process (executor) pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize), - pendingL2BlocksToProcessWG: new(sync.WaitGroup), + pendingL2BlocksToProcessWG: new(WaitGroupCount), // pending L2 blocks to store in the state pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize), - pendingL2BlocksToStoreWG: new(sync.WaitGroup), + pendingL2BlocksToStoreWG: new(WaitGroupCount), storedFlushID: 0, // executor flushid control proverID: "", @@ -139,6 +147,7 @@ func newFinalizer( dataToStream: dataToStream, } + f.l2BlockReorg.Store(false) f.haltFinalizer.Store(false) return &f @@ -375,12 +384,19 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { log.Debug("finalizer init loop") showNotFoundTxLog := true // used to log debug only the first message when there is no txs to process for { + if f.l2BlockReorg.Load() { + err := f.processL2BlockReorg(ctx) + if err != nil { + log.Errorf("error processing L2 block reorg, error: %v", err) + } + } + // We have reached the L2 block time, we need to close the current L2 block and open a new one - if f.wipL2Block.timestamp+uint64(f.cfg.L2BlockMaxDeltaTimestamp.Seconds()) <= uint64(time.Now().Unix()) { + if f.wipL2Block.createdAt.Add(f.cfg.L2BlockMaxDeltaTimestamp.Duration).Before(time.Now()) { f.finalizeWIPL2Block(ctx) } - tx, err := f.workerIntf.GetBestFittingTx(f.wipBatch.imRemainingResources) + tx, err := f.workerIntf.GetBestFittingTx(f.wipBatch.imRemainingResources, f.wipBatch.imHighReservedZKCounters) // If we have txs pending to process but none of them fits into the wip batch, we close the wip batch and open a new one if err == ErrNoFittingTransaction { @@ -503,7 +519,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first loss := new(big.Int).Sub(tx.EffectiveGasPrice, txGasPrice) // If loss > 0 the warning message indicating we loss fee for thix tx if loss.Cmp(new(big.Int).SetUint64(0)) == 1 { - log.Warnf("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, tx: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr) + log.Infof("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, tx: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr) } tx.EffectiveGasPrice.Set(txGasPrice) @@ -541,7 +557,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first batchRequest.Transactions = append(batchRequest.Transactions, effectivePercentageAsDecodedHex...) executionStart := time.Now() - batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) + batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) executionTime := time.Since(executionStart) f.wipL2Block.metrics.transactionsTimes.executor += executionTime @@ -568,24 +584,27 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first oldStateRoot := f.wipBatch.imStateRoot if len(batchResponse.BlockResponses) > 0 { - errWg, err = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot) + var neededZKCounters state.ZKCounters + errWg, err, neededZKCounters = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot) if err != nil { return errWg, err } - } - // Update imStateRoot - f.wipBatch.imStateRoot = batchResponse.NewStateRoot + // Update imStateRoot + f.wipBatch.imStateRoot = batchResponse.NewStateRoot - log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, time: {process: %v, executor: %v}, used counters: %s, reserved counters: %s", - tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(), - time.Since(start), executionTime, f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters)) + log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, time: {process: %v, executor: %v}, counters: {used: %s, reserved: %s, needed: %s}, contextId: %s", + tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(), + time.Since(start), executionTime, f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), contextId) - return nil, nil + return nil, nil + } else { + return nil, fmt.Errorf("error executirn batch %d, batchResponse has returned 0 blockResponses and should return 1", f.wipBatch.batchNumber) + } } // handleProcessTransactionResponse handles the response of transaction processing. -func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error) { +func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error, neededZKCounters state.ZKCounters) { txResponse := result.BlockResponses[0].TransactionResponses[0] // Update metrics @@ -596,7 +615,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx if !state.IsStateRootChanged(errorCode) { // If intrinsic error or OOC error, we skip adding the transaction to the batch errWg = f.handleProcessTransactionError(ctx, result, tx) - return errWg, txResponse.RomError + return errWg, txResponse.RomError, state.ZKCounters{} } egpEnabled := f.effectiveGasPrice.IsEnabled() @@ -611,7 +630,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx if err != nil { if egpEnabled { log.Errorf("failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error()) - return nil, err + return nil, err, state.ZKCounters{} } else { log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error()) tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPrice#2: %s", tx.EGPLog.Error, err) @@ -636,28 +655,33 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx } if errCompare != nil && egpEnabled { - return nil, errCompare + return nil, errCompare, state.ZKCounters{} } } } - // Check if reserved resources of the tx fits in the remaining batch resources + // Check if needed resources of the tx fits in the remaining batch resources + // Needed resources are the used resources plus the max difference between used and reserved of all the txs (including this) in the batch + neededZKCounters, newHighZKCounters := getNeededZKCounters(f.wipBatch.imHighReservedZKCounters, result.UsedZkCounters, result.ReservedZkCounters) subOverflow := false - fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: result.ReservedZkCounters, Bytes: uint64(len(tx.RawTx))}) + fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: uint64(len(tx.RawTx))}) if fits { // Subtract the used resources from the batch subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))}) - if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters - sLog := fmt.Sprintf("tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx used counters: %s", - tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters)) + if subOverflow { // Sanity check, this cannot happen as neededZKCounters should be >= that usedZKCounters + sLog := fmt.Sprintf("tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters), f.logZKCounters(result.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters)) log.Errorf(sLog) f.LogEvent(ctx, event.Level_Error, event.EventID_UsedZKCountersOverflow, sLog, nil) } + + // Update highReservedZKCounters + f.wipBatch.imHighReservedZKCounters = newHighZKCounters } else { - log.Infof("current tx %s reserved resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx reserved counters: %s", - tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.ReservedZkCounters)) + log.Infof("current tx %s needed resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters), f.logZKCounters(result.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters)) if !f.batchConstraints.IsWithinConstraints(result.ReservedZkCounters) { log.Infof("current tx %s reserved resources exceeds the max limit for batch resources (node OOC), setting tx as invalid in the pool", tx.HashStr) @@ -673,15 +697,15 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.Hash.String(), err) } - return nil, ErrBatchResourceOverFlow + return nil, ErrBatchResourceOverFlow, state.ZKCounters{} } } - // If reserved tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources) + // If needed tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources) // we update the ZKCounters of the tx and returns ErrBatchResourceOverFlow error if !fits || subOverflow { f.workerIntf.UpdateTxZKCounters(txResponse.TxHash, tx.From, result.UsedZkCounters, result.ReservedZkCounters) - return nil, ErrBatchResourceOverFlow + return nil, ErrBatchResourceOverFlow, state.ZKCounters{} } // Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging @@ -704,7 +728,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx // Update metrics f.wipL2Block.metrics.gas += txResponse.GasUsed - return nil, nil + return nil, nil, neededZKCounters } // compareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice. @@ -752,14 +776,14 @@ func (f *finalizer) compareTxEffectiveGasPrice(ctx context.Context, tx *TxTracke } func (f *finalizer) updateWorkerAfterSuccessfulProcessing(ctx context.Context, txHash common.Hash, txFrom common.Address, isForced bool, result *state.ProcessBatchResponse) { - // Delete the transaction from the worker + // Delete the transaction from the worker pool if isForced { f.workerIntf.DeleteForcedTx(txHash, txFrom) - log.Debugf("forced tx %s deleted from address %s", txHash.String(), txFrom.Hex()) + log.Debugf("forced tx %s deleted from worker, address: %s", txHash.String(), txFrom.Hex()) return } else { - f.workerIntf.DeleteTx(txHash, txFrom) - log.Debugf("tx %s deleted from address %s", txHash.String(), txFrom.Hex()) + f.workerIntf.MoveTxPendingToStore(txHash, txFrom) + log.Debugf("tx %s moved to pending to store in worker, address: %s", txHash.String(), txFrom.Hex()) } txsToDelete := f.workerIntf.UpdateAfterSingleSuccessfulTxExecution(txFrom, result.ReadWriteAddresses) @@ -818,7 +842,7 @@ func (f *finalizer) handleProcessTransactionError(ctx context.Context, result *s } else { // Delete the transaction from the txSorted list f.workerIntf.DeleteTx(tx.Hash, tx.From) - log.Debugf("tx %s deleted from txSorted list", tx.HashStr) + log.Debugf("tx %s deleted from worker pool, address: %s", tx.HashStr, tx.From) wg.Add(1) go func() { @@ -858,7 +882,7 @@ func (f *finalizer) logZKCounters(counters state.ZKCounters) string { func (f *finalizer) Halt(ctx context.Context, err error, isFatal bool) { f.haltFinalizer.Store(true) - f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error, error: %s", err), nil) + f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error: %s", err), nil) if isFatal { log.Fatalf("fatal error on finalizer, error: %v", err) diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go index 8e7b5fa9d9..6e54c342f8 100644 --- a/sequencer/finalizer_test.go +++ b/sequencer/finalizer_test.go @@ -941,21 +941,8 @@ func TestNewFinalizer(t *testing.T) { } }*/ -// TestFinalizer_closeBatch tests the closeBatch method. -func TestFinalizer_closeWIPBatch(t *testing.T) { - // arrange - f = setupFinalizer(true) - // set wip batch has at least one L2 block as it can not be closed empty - f.wipBatch.countOfL2Blocks++ - - usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources) - - receipt := state.ProcessingReceipt{ - BatchNumber: f.wipBatch.batchNumber, - BatchResources: usedResources, - ClosingReason: f.wipBatch.closingReason, - } - +// TestFinalizer_finalizeSIPBatch tests the finalizeSIPBatch method. +func TestFinalizer_finalizeSIPBatch(t *testing.T) { managerErr := fmt.Errorf("some err") testCases := []struct { @@ -979,22 +966,39 @@ func TestFinalizer_closeWIPBatch(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // arrange - stateMock.Mock.On("CloseWIPBatch", ctx, receipt, mock.Anything).Return(tc.managerErr).Once() + f = setupFinalizer(true) + // set wip batch has at least one L2 block as it can not be closed empty + f.sipBatch.countOfL2Blocks++ + + usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.imRemainingResources) + + receipt := state.ProcessingReceipt{ + BatchNumber: f.wipBatch.batchNumber, + BatchResources: usedResources, + ClosingReason: f.wipBatch.closingReason, + } + + // arrange stateMock.On("BeginStateTransaction", ctx).Return(dbTxMock, nilErr).Once() + stateMock.On("CloseWIPBatch", ctx, receipt, mock.Anything).Return(tc.managerErr).Once() + if tc.managerErr == nil { + stateMock.On("GetBatchByNumber", ctx, f.sipBatch.batchNumber, nil).Return(&state.Batch{BatchNumber: f.sipBatch.batchNumber}, nilErr).Once() + stateMock.On("GetForkIDByBatchNumber", f.wipBatch.batchNumber).Return(uint64(9)).Once() + stateMock.On("GetL1InfoTreeDataFromBatchL2Data", ctx, mock.Anything, nil).Return(map[uint32]state.L1DataV2{}, state.ZeroHash, state.ZeroHash, nil) + stateMock.On("ProcessBatchV2", ctx, mock.Anything, false).Return(&state.ProcessBatchResponse{}, "", nil) + stateMock.On("UpdateBatchAsChecked", ctx, f.sipBatch.batchNumber, nil).Return(nil) dbTxMock.On("Commit", ctx).Return(nilErr).Once() } else { dbTxMock.On("Rollback", ctx).Return(nilErr).Once() } // act - err := f.closeWIPBatch(ctx) + err := f.finalizeSIPBatch(ctx) // assert if tc.expectedErr != nil { - assert.Error(t, err) - assert.EqualError(t, err, tc.expectedErr.Error()) - assert.ErrorIs(t, err, tc.managerErr) + assert.ErrorContains(t, err, tc.expectedErr.Error()) } else { assert.NoError(t, err) } @@ -1745,7 +1749,7 @@ func TestFinalizer_updateWorkerAfterSuccessfulProcessing(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // arrange finalizerInstance := setupFinalizer(false) - workerMock.On("DeleteTx", tc.txTracker.Hash, tc.txTracker.From).Times(tc.expectedDeleteTxCount) + workerMock.On("MoveTxPendingToStore", tc.txTracker.Hash, tc.txTracker.From).Times(tc.expectedDeleteTxCount) txsToDelete := make([]*TxTracker, 0, len(tc.processBatchResponse.ReadWriteAddresses)) for _, infoReadWrite := range tc.processBatchResponse.ReadWriteAddresses { txsToDelete = append(txsToDelete, &TxTracker{ @@ -2037,7 +2041,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // arrange f = setupFinalizer(true) - maxRemainingResource := getMaxRemainingResources(bc) + maxRemainingResource := getMaxBatchResources(bc) f.wipBatch.imRemainingResources = tc.modifyResourceFunc(maxRemainingResource) // act @@ -2098,7 +2102,7 @@ func TestFinalizer_getConstraintThresholdUint32(t *testing.T) { func TestFinalizer_getRemainingResources(t *testing.T) { // act - remainingResources := getMaxRemainingResources(bc) + remainingResources := getMaxBatchResources(bc) // assert assert.Equal(t, remainingResources.ZKCounters.GasUsed, bc.MaxCumulativeGasUsed) @@ -2196,7 +2200,7 @@ func setupFinalizer(withWipBatch bool) *finalizer { initialStateRoot: oldHash, imStateRoot: newHash, timestamp: now(), - imRemainingResources: getMaxRemainingResources(bc), + imRemainingResources: getMaxBatchResources(bc), closingReason: state.EmptyClosingReason, } } @@ -2213,6 +2217,7 @@ func setupFinalizer(withWipBatch bool) *finalizer { poolIntf: poolMock, stateIntf: stateMock, wipBatch: wipBatch, + sipBatch: wipBatch, batchConstraints: bc, nextForcedBatches: make([]state.ForcedBatch, 0), nextForcedBatchDeadline: 0, @@ -2220,9 +2225,9 @@ func setupFinalizer(withWipBatch bool) *finalizer { effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice), eventLog: eventLog, pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize), - pendingL2BlocksToProcessWG: new(sync.WaitGroup), + pendingL2BlocksToProcessWG: new(WaitGroupCount), pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize), - pendingL2BlocksToStoreWG: new(sync.WaitGroup), + pendingL2BlocksToStoreWG: new(WaitGroupCount), storedFlushID: 0, storedFlushIDCond: sync.NewCond(new(sync.Mutex)), proverID: "", diff --git a/sequencer/forcedbatch.go b/sequencer/forcedbatch.go index ebe078c1b8..85f74abee1 100644 --- a/sequencer/forcedbatch.go +++ b/sequencer/forcedbatch.go @@ -40,15 +40,16 @@ func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumber ui forcedBatchToProcess = *missingForcedBatch } + var contextId string log.Infof("processing forced batch %d, lastBatchNumber: %d, stateRoot: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String()) - lastBatchNumber, stateRoot, err = f.processForcedBatch(ctx, forcedBatchToProcess, lastBatchNumber, stateRoot) + lastBatchNumber, stateRoot, contextId, err = f.processForcedBatch(ctx, forcedBatchToProcess, lastBatchNumber, stateRoot) if err != nil { log.Errorf("error when processing forced batch %d, error: %v", forcedBatchToProcess.ForcedBatchNumber, err) return lastBatchNumber, stateRoot } - log.Infof("processed forced batch %d, batchNumber: %d, newStateRoot: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String()) + log.Infof("processed forced batch %d, batchNumber: %d, newStateRoot: %s, contextId: %s", forcedBatchToProcess.ForcedBatchNumber, lastBatchNumber, stateRoot.String(), contextId) nextForcedBatchNumber += 1 } @@ -57,26 +58,26 @@ func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumber ui return lastBatchNumber, stateRoot } -func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.ForcedBatch, lastBatchNumber uint64, stateRoot common.Hash) (newLastBatchNumber uint64, newStateRoot common.Hash, retErr error) { +func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.ForcedBatch, lastBatchNumber uint64, stateRoot common.Hash) (newLastBatchNumber uint64, newStateRoot common.Hash, ctxId string, retErr error) { dbTx, err := f.stateIntf.BeginStateTransaction(ctx) if err != nil { log.Errorf("failed to begin state transaction for process forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err) - return lastBatchNumber, stateRoot, err + return lastBatchNumber, stateRoot, "", err } // Helper function in case we get an error when processing the forced batch - rollbackOnError := func(retError error) (newLastBatchNumber uint64, newStateRoot common.Hash, retErr error) { + rollbackOnError := func(retError error) (newLastBatchNumber uint64, newStateRoot common.Hash, ctxId string, retErr error) { err := dbTx.Rollback(ctx) if err != nil { - return lastBatchNumber, stateRoot, fmt.Errorf("rollback error due to error %v, error: %v", retError, err) + return lastBatchNumber, stateRoot, "", fmt.Errorf("rollback error due to error %v, error: %v", retError, err) } - return lastBatchNumber, stateRoot, retError + return lastBatchNumber, stateRoot, "", retError } // Get L1 block for the forced batch fbL1Block, err := f.stateIntf.GetBlockByNumber(ctx, forcedBatch.BlockNumber, dbTx) if err != nil { - return lastBatchNumber, stateRoot, fmt.Errorf("error getting L1 block number %d for forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, forcedBatch.ForcedBatchNumber, err) + return lastBatchNumber, stateRoot, "", fmt.Errorf("error getting L1 block number %d for forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, forcedBatch.ForcedBatchNumber, err) } newBatchNumber := lastBatchNumber + 1 @@ -107,7 +108,7 @@ func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.Fo Caller: stateMetrics.DiscardCallerLabel, } - batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true) + batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true) if err != nil { return rollbackOnError(fmt.Errorf("failed to process/execute forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err)) } @@ -141,7 +142,7 @@ func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.Fo return rollbackOnError(fmt.Errorf("error when commit dbTx when processing forced batch %d, error: %v", forcedBatch.ForcedBatchNumber, err)) } - return newBatchNumber, batchResponse.NewStateRoot, nil + return newBatchNumber, batchResponse.NewStateRoot, contextId, nil } // addForcedTxToWorker adds the txs of the forced batch to the worker diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 10c58980ac..c92f502e10 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -50,7 +50,7 @@ type stateInterface interface { GetBalanceByStateRoot(ctx context.Context, address common.Address, root common.Hash) (*big.Int, error) GetNonceByStateRoot(ctx context.Context, address common.Address, root common.Hash) (*big.Int, error) GetLastStateRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) - ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) + ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error CloseWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) @@ -84,15 +84,16 @@ type stateInterface interface { } type workerInterface interface { - GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) + GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error) UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker UpdateTxZKCounters(txHash common.Hash, from common.Address, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) AddTxTracker(ctx context.Context, txTracker *TxTracker) (replacedTx *TxTracker, dropReason error) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker DeleteTx(txHash common.Hash, from common.Address) - AddPendingTxToStore(txHash common.Hash, addr common.Address) - DeletePendingTxToStore(txHash common.Hash, addr common.Address) + MoveTxPendingToStore(txHash common.Hash, addr common.Address) + DeleteTxPendingToStore(txHash common.Hash, addr common.Address) NewTxTracker(tx types.Transaction, usedZKcounters state.ZKCounters, reservedZKCouners state.ZKCounters, ip string) (*TxTracker, error) AddForcedTx(txHash common.Hash, addr common.Address) DeleteForcedTx(txHash common.Hash, addr common.Address) + RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker) } diff --git a/sequencer/l2block.go b/sequencer/l2block.go index c49c3fc825..3c88245106 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -24,9 +24,11 @@ type L2Block struct { l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry l1InfoTreeExitRootChanged bool bytes uint64 - usedZKCounters state.ZKCounters - reservedZKCounters state.ZKCounters + usedZKCountersOnNew state.ZKCounters + reservedZKCountersOnNew state.ZKCounters + highReservedZKCounters state.ZKCounters transactions []*TxTracker + batch *Batch batchResponse *state.ProcessBatchResponse metrics metrics } @@ -85,10 +87,6 @@ func (f *finalizer) addPendingL2BlockToProcess(ctx context.Context, l2Block *L2B func (f *finalizer) addPendingL2BlockToStore(ctx context.Context, l2Block *L2Block) { f.pendingL2BlocksToStoreWG.Add(1) - for _, tx := range l2Block.transactions { - f.workerIntf.AddPendingTxToStore(tx.Hash, tx.From) - } - select { case f.pendingL2BlocksToStore <- l2Block: case <-ctx.Done(): @@ -96,13 +94,15 @@ func (f *finalizer) addPendingL2BlockToStore(ctx context.Context, l2Block *L2Blo // delete the pending TxToStore added in the worker f.pendingL2BlocksToStoreWG.Done() for _, tx := range l2Block.transactions { - f.workerIntf.DeletePendingTxToStore(tx.Hash, tx.From) + f.workerIntf.DeleteTxPendingToStore(tx.Hash, tx.From) } } } // processPendingL2Blocks processes (executor) the pending to process L2 blocks func (f *finalizer) processPendingL2Blocks(ctx context.Context) { + //rand.Seed(time.Now().UnixNano()) + for { select { case l2Block, ok := <-f.pendingL2BlocksToProcess: @@ -111,12 +111,36 @@ func (f *finalizer) processPendingL2Blocks(ctx context.Context) { return } + // if l2BlockReorg we need to "flush" the channel to discard pending L2Blocks + if f.l2BlockReorg.Load() { + f.pendingL2BlocksToProcessWG.Done() + continue + } + err := f.processL2Block(ctx, l2Block) if err != nil { + halt := false + if f.lastL2BlockWasReorg { + // We had 2 consecutives reorg in the same L2 block, we halt after log/dump the info + halt = true + } else { + f.l2BlockReorg.Store(true) + f.lastL2BlockWasReorg = true + } + + warnmsg := fmt.Sprintf("sequencer L2 block [%d] reorg detected, batch: %d, processing it...", l2Block.trackingNum, l2Block.batch.batchNumber) + log.Warnf(warnmsg) + f.LogEvent(ctx, event.Level_Critical, event.EventID_L2BlockReorg, warnmsg, nil) + // Dump L2Block info f.dumpL2Block(l2Block) - f.Halt(ctx, fmt.Errorf("error processing L2 block [%d], error: %v", l2Block.trackingNum, err), false) + + if halt { + f.Halt(ctx, fmt.Errorf("consecutives L2 block reorgs in the same L2 block [%d]", l2Block.trackingNum), false) + } + } else { + f.lastL2BlockWasReorg = false } f.pendingL2BlocksToProcessWG.Done() @@ -164,13 +188,23 @@ func (f *finalizer) storePendingL2Blocks(ctx context.Context) { func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error { processStart := time.Now() - initialStateRoot := f.wipBatch.finalStateRoot + if f.pipBatch == nil { + f.pipBatch = l2Block.batch + } else if f.pipBatch.batchNumber != l2Block.batch.batchNumber { + // We have received the first L2 block of the next batch to process + // We need to "propagate" finalStateRoot to the new batch as initalStateRoot/finalStateRoot and set it as the current pipBatch + l2Block.batch.initialStateRoot = f.pipBatch.finalStateRoot + l2Block.batch.finalStateRoot = f.pipBatch.finalStateRoot + f.pipBatch = l2Block.batch + } + + initialStateRoot := f.pipBatch.finalStateRoot log.Infof("processing L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s txs: %d", - l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, len(l2Block.transactions)) - batchResponse, batchL2DataSize, err := f.executeL2Block(ctx, initialStateRoot, l2Block) + batchResponse, batchL2DataSize, contextId, err := f.executeL2Block(ctx, initialStateRoot, l2Block) if err != nil { return fmt.Errorf("failed to execute L2 block [%d], error: %v", l2Block.trackingNum, err) @@ -199,39 +233,55 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error l2Block.batchResponse = batchResponse + // Check if needed resources of the L2 block fits in the remaining batch resources + // Needed resources are the used resources plus the max difference between used and reserved of all the L2 blocks (including this) in the batch + neededZKCounters, newHighZKCounters := getNeededZKCounters(l2Block.batch.finalHighReservedZKCounters, batchResponse.UsedZkCounters, batchResponse.ReservedZkCounters) + // Update finalRemainingResources of the batch - fits, overflowResource := f.wipBatch.finalRemainingResources.Fits(state.BatchResources{ZKCounters: batchResponse.ReservedZkCounters, Bytes: batchL2DataSize}) + fits, overflowResource := l2Block.batch.finalRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: batchL2DataSize}) if fits { - subOverflow, overflowResource := f.wipBatch.finalRemainingResources.Sub(state.BatchResources{ZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize}) + subOverflow, overflowResource := l2Block.batch.finalRemainingResources.Sub(state.BatchResources{ZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize}) if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters - return fmt.Errorf("error subtracting L2 block %d [%d] used resources from the batch %d, overflow resource: %s, batch counters: %s, L2 block used counters: %s, batch bytes: %d, L2 block bytes: %d", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize) + return fmt.Errorf("error subtracting L2 block %d [%d] needed resources from the batch %d, overflow resource: %s, batch bytes: %d, L2 block bytes: %d, counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, overflowResource, l2Block.batch.finalRemainingResources.Bytes, batchL2DataSize, + f.logZKCounters(l2Block.batch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.imHighReservedZKCounters)) } - } else { - overflowLog := fmt.Sprintf("L2 block %d [%d] reserved resources exceeds the remaining batch %d resources, overflow resource: %s, batch counters: %s, L2 block reserved counters: %s, batch bytes: %d, L2 block bytes: %d", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize) - log.Warnf(overflowLog) + l2Block.batch.finalHighReservedZKCounters = newHighZKCounters + l2Block.highReservedZKCounters = l2Block.batch.finalHighReservedZKCounters + } else { + overflowLog := fmt.Sprintf("L2 block %d [%d] needed resources exceeds the remaining batch %d resources, overflow resource: %s, batch bytes: %d, L2 block bytes: %d, counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, overflowResource, l2Block.batch.finalRemainingResources.Bytes, batchL2DataSize, + f.logZKCounters(l2Block.batch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.imHighReservedZKCounters)) f.LogEvent(ctx, event.Level_Warning, event.EventID_ReservedZKCountersOverflow, overflowLog, nil) + + return fmt.Errorf(overflowLog) } // Update finalStateRoot of the batch to the newStateRoot for the L2 block - f.wipBatch.finalStateRoot = l2Block.batchResponse.NewStateRoot + l2Block.batch.finalStateRoot = l2Block.batchResponse.NewStateRoot f.updateFlushIDs(batchResponse.FlushID, batchResponse.StoredFlushID) + if f.pendingL2BlocksToStoreWG.Count() > 0 { + startWait := time.Now() + f.pendingL2BlocksToStoreWG.Wait() + log.Debugf("waiting for previous L2 block to be stored took: %v", time.Since(startWait)) + } f.addPendingL2BlockToStore(ctx, l2Block) // metrics l2Block.metrics.l2BlockTimes.sequencer = time.Since(processStart) - l2Block.metrics.l2BlockTimes.executor - l2Block.metrics.close(l2Block.createdAt, int64(len(l2Block.transactions))) + if f.cfg.SequentialProcessL2Block { + l2Block.metrics.close(l2Block.createdAt, int64(len(l2Block.transactions)), f.cfg.SequentialProcessL2Block) + } f.metrics.addL2BlockMetrics(l2Block.metrics) - log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, used counters: %s, reserved counters: %s", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, l2Block.batchResponse.NewStateRoot, + log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, counters: {used: %s, reserved: %s, needed: %s, high: %s}, contextId: %s", + blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, l2Block.batchResponse.NewStateRoot, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot, - f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters)) + f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.logZKCounters(neededZKCounters), f.logZKCounters(l2Block.batch.finalHighReservedZKCounters), contextId) if f.cfg.Metrics.EnableLog { log.Infof("metrics-log: {l2block: {num: %d, trackingNum: %d, metrics: {%s}}, interval: {startAt: %d, metrics: {%s}}}", @@ -242,12 +292,12 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error } // executeL2Block executes a L2 Block in the executor and returns the batch response from the executor and the batchL2Data size -func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.Hash, l2Block *L2Block) (*state.ProcessBatchResponse, uint64, error) { +func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common.Hash, l2Block *L2Block) (*state.ProcessBatchResponse, uint64, string, error) { executeL2BLockError := func(err error) { - log.Errorf("execute L2 block [%d] error %v, batch: %d, initialStateRoot: %s", l2Block.trackingNum, err, f.wipBatch.batchNumber, initialStateRoot) + log.Errorf("execute L2 block [%d] error %v, batch: %d, initialStateRoot: %s", l2Block.trackingNum, err, l2Block.batch.batchNumber, initialStateRoot) // Log batch detailed info for i, tx := range l2Block.transactions { - log.Infof("batch: %d, block: [%d], tx position: %d, tx hash: %s", f.wipBatch.batchNumber, l2Block.trackingNum, i, tx.HashStr) + log.Infof("batch: %d, block: [%d], tx position: %d, tx hash: %s", l2Block.batch.batchNumber, l2Block.trackingNum, i, tx.HashStr) } } @@ -262,7 +312,7 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common. epHex, err := hex.DecodeHex(fmt.Sprintf("%x", tx.EGPPercentage)) if err != nil { log.Errorf("error decoding hex value for effective gas price percentage for tx %s, error: %v", tx.HashStr, err) - return nil, 0, err + return nil, 0, "", err } txData := append(tx.RawTx, epHex...) @@ -271,16 +321,16 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common. } batchRequest := state.ProcessRequest{ - BatchNumber: f.wipBatch.batchNumber, + BatchNumber: l2Block.batch.batchNumber, OldStateRoot: initialStateRoot, - Coinbase: f.wipBatch.coinbase, + Coinbase: l2Block.batch.coinbase, L1InfoRoot_V2: state.GetMockL1InfoRoot(), TimestampLimit_V2: l2Block.timestamp, Transactions: batchL2Data, SkipFirstChangeL2Block_V2: false, SkipWriteBlockInfoRoot_V2: false, Caller: stateMetrics.DiscardCallerLabel, - ForkID: f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber), + ForkID: f.stateIntf.GetForkIDByBatchNumber(l2Block.batch.batchNumber), SkipVerifyL1InfoRoot_V2: true, L1InfoTreeData_V2: map[uint32]state.L1DataV2{}, } @@ -290,31 +340,26 @@ func (f *finalizer) executeL2Block(ctx context.Context, initialStateRoot common. MinTimestamp: uint64(l2Block.l1InfoTreeExitRoot.GlobalExitRoot.Timestamp.Unix()), } - var ( - err error - batchResponse *state.ProcessBatchResponse - ) - executionStart := time.Now() - batchResponse, err = f.stateIntf.ProcessBatchV2(ctx, batchRequest, true) + batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, true) l2Block.metrics.l2BlockTimes.executor = time.Since(executionStart) if err != nil { executeL2BLockError(err) - return nil, 0, err + return nil, 0, contextId, err } if batchResponse.ExecutorError != nil { executeL2BLockError(batchResponse.ExecutorError) - return nil, 0, ErrExecutorError + return nil, 0, contextId, ErrExecutorError } if batchResponse.IsRomOOCError { - executeL2BLockError(err) - return nil, 0, ErrProcessBatchOOC + executeL2BLockError(batchResponse.RomError_V2) + return nil, 0, contextId, ErrProcessBatchOOC } - return batchResponse, uint64(len(batchL2Data)), nil + return batchResponse, uint64(len(batchL2Data)), contextId, nil } // storeL2Block stores the L2 block in the state and updates the related batch and transactions @@ -331,7 +376,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { // If the L2 block has txs now f.storedFlushID >= l2BlockToStore.flushId, we can store tx blockResponse := l2Block.batchResponse.BlockResponses[0] log.Infof("storing L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String()) dbTx, err := f.stateIntf.BeginStateTransaction(ctx) @@ -347,7 +392,24 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { return retError } - forkID := f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber) + if (f.sipBatch == nil) || (f.sipBatch.batchNumber != l2Block.batch.batchNumber) { + // We have l2 blocks to store from a new batch, therefore we insert this new batch in the statedb + // First we need to close the current sipBatch + if f.sipBatch != nil { + err := f.closeSIPBatch(ctx, dbTx) + if err != nil { + return rollbackOnError(fmt.Errorf("error when closing sip batch %d, initialStateRoot: %s, error: %v", f.sipBatch.batchNumber, f.sipBatch.initialStateRoot, err)) + } + } + // We insert new SIP batch in the statedb + err := f.insertSIPBatch(ctx, l2Block.batch.batchNumber, l2Block.batch.initialStateRoot, dbTx) + if err != nil { + return rollbackOnError(fmt.Errorf("error when inserting new sip batch %d, initialStateRoot: %s, error: %v", l2Block.batch.batchNumber, l2Block.batch.initialStateRoot, err)) + } + f.sipBatch = l2Block.batch + } + + forkID := f.stateIntf.GetForkIDByBatchNumber(l2Block.batch.batchNumber) txsEGPLog := []*state.EffectiveGasPriceLog{} for _, tx := range l2Block.transactions { @@ -356,16 +418,16 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { } // Store L2 block in the state - err = f.stateIntf.StoreL2Block(ctx, f.wipBatch.batchNumber, blockResponse, txsEGPLog, dbTx) + err = f.stateIntf.StoreL2Block(ctx, l2Block.batch.batchNumber, blockResponse, txsEGPLog, dbTx) if err != nil { return rollbackOnError(fmt.Errorf("database error on storing L2 block %d [%d], error: %v", blockResponse.BlockNumber, l2Block.trackingNum, err)) } // Now we need to update de BatchL2Data of the wip batch and also update the status of the L2 block txs in the pool - batch, err := f.stateIntf.GetBatchByNumber(ctx, f.wipBatch.batchNumber, dbTx) + batch, err := f.stateIntf.GetBatchByNumber(ctx, l2Block.batch.batchNumber, dbTx) if err != nil { - return rollbackOnError(fmt.Errorf("error when getting batch %d from the state, error: %v", f.wipBatch.batchNumber, err)) + return rollbackOnError(fmt.Errorf("error when getting batch %d from the state, error: %v", l2Block.batch.batchNumber, err)) } // Add changeL2Block to batch.BatchL2Data @@ -384,13 +446,15 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { batch.BatchL2Data = append(batch.BatchL2Data, blockL2Data...) batch.Resources.SumUp(state.BatchResources{ZKCounters: l2Block.batchResponse.UsedZkCounters, Bytes: uint64(len(blockL2Data))}) + batch.HighReservedZKCounters = l2Block.highReservedZKCounters receipt := state.ProcessingReceipt{ - BatchNumber: f.wipBatch.batchNumber, - StateRoot: l2Block.batchResponse.NewStateRoot, - LocalExitRoot: l2Block.batchResponse.NewLocalExitRoot, - BatchL2Data: batch.BatchL2Data, - BatchResources: batch.Resources, + BatchNumber: l2Block.batch.batchNumber, + StateRoot: l2Block.batchResponse.NewStateRoot, + LocalExitRoot: l2Block.batchResponse.NewLocalExitRoot, + BatchL2Data: batch.BatchL2Data, + BatchResources: batch.Resources, + HighReservedZKCounters: batch.HighReservedZKCounters, } // We need to update the batch GER only in the GER of the block (response) is not zero, since the final GER stored in the batch @@ -403,7 +467,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { err = f.stateIntf.UpdateWIPBatch(ctx, receipt, dbTx) if err != nil { - return rollbackOnError(fmt.Errorf("error when updating wip batch %d, error: %v", f.wipBatch.batchNumber, err)) + return rollbackOnError(fmt.Errorf("error when updating wip batch %d, error: %v", l2Block.batch.batchNumber, err)) } err = dbTx.Commit(ctx) @@ -429,13 +493,13 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { for _, tx := range l2Block.transactions { // Delete the tx from the pending list in the worker (addrQueue) - f.workerIntf.DeletePendingTxToStore(tx.Hash, tx.From) + f.workerIntf.DeleteTxPendingToStore(tx.Hash, tx.From) } endStoring := time.Now() log.Infof("stored L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + blockResponse.BlockNumber, l2Block.trackingNum, l2Block.batch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot.String(), endStoring.Sub(startStoring)) return nil @@ -443,7 +507,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { // finalizeWIPL2Block closes the wip L2 block and opens a new one func (f *finalizer) finalizeWIPL2Block(ctx context.Context) { - log.Debugf("finalizing WIP L2 block [%d]", f.wipL2Block.trackingNum) + log.Debugf("finalizing wip L2 block [%d]", f.wipL2Block.trackingNum) prevTimestamp := f.wipL2Block.timestamp prevL1InfoTreeIndex := f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex @@ -455,7 +519,7 @@ func (f *finalizer) finalizeWIPL2Block(ctx context.Context) { // closeWIPL2Block closes the wip L2 block func (f *finalizer) closeWIPL2Block(ctx context.Context) { - log.Debugf("closing WIP L2 block [%d]", f.wipL2Block.trackingNum) + log.Debugf("closing wip L2 block [%d]", f.wipL2Block.trackingNum) f.wipBatch.countOfL2Blocks++ @@ -469,7 +533,46 @@ func (f *finalizer) closeWIPL2Block(ctx context.Context) { // We update imStateRoot (used in tx-by-tx execution) to the finalStateRoot that has been updated after process the WIP L2 Block f.wipBatch.imStateRoot = f.wipBatch.finalStateRoot } else { + if f.pendingL2BlocksToProcessWG.Count() > 0 { + startWait := time.Now() + f.pendingL2BlocksToProcessWG.Wait() + waitTime := time.Since(startWait) + log.Debugf("waiting for previous L2 block to be processed took: %v", waitTime) + f.wipL2Block.metrics.waitl2BlockTime = waitTime + } + f.addPendingL2BlockToProcess(ctx, f.wipL2Block) + + f.wipL2Block.metrics.close(f.wipL2Block.createdAt, int64(len(f.wipL2Block.transactions)), f.cfg.SequentialProcessL2Block) + + l2BlockResourcesUsed := state.BatchResources{} + l2BlockResourcesReserved := state.BatchResources{} + + for _, tx := range f.wipL2Block.transactions { + l2BlockResourcesUsed.ZKCounters.SumUp(tx.UsedZKCounters) + l2BlockResourcesReserved.ZKCounters.SumUp(tx.ReservedZKCounters) + } + l2BlockResourcesUsed.ZKCounters.SumUp(f.wipL2Block.usedZKCountersOnNew) + l2BlockResourcesReserved.ZKCounters.SumUp(f.wipL2Block.reservedZKCountersOnNew) + + log.Infof("closed wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, txs: %d, used counters: %s, reserved counters: %s", + f.wipL2Block.trackingNum, f.wipL2Block.batch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, + f.wipL2Block.l1InfoTreeExitRootChanged, len(f.wipL2Block.transactions), f.logZKCounters(l2BlockResourcesUsed.ZKCounters), f.logZKCounters(l2BlockResourcesReserved.ZKCounters)) + + if f.nextStateRootSync.Before(time.Now()) { + log.Debug("sync stateroot time reached") + f.waitPendingL2Blocks() + + // Sanity-check: At this point f.sipBatch should be the same as the batch of the last L2 block processed + // (only if we haven't had a L2 block reorg just in the last block and it's the first one of the wipBatch) + if f.wipBatch.batchNumber != f.sipBatch.batchNumber && !(f.l2BlockReorg.Load() && f.wipBatch.countOfL2Blocks <= 2) { + f.Halt(ctx, fmt.Errorf("wipBatch %d doesn't match sipBatch %d after all pending L2 blocks has been processed/stored", f.wipBatch.batchNumber, f.sipBatch.batchNumber), false) + } + + f.wipBatch.imStateRoot = f.wipBatch.finalStateRoot + f.scheduleNextStateRootSync() + log.Infof("stateroot synced on L2 block [%d] to %s, next sync at %v", f.wipL2Block.trackingNum, f.wipBatch.imStateRoot, f.nextStateRootSync) + } } f.wipL2Block = nil @@ -480,15 +583,15 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, processStart := time.Now() newL2Block := &L2Block{} - newL2Block.createdAt = time.Now() + now := time.Now() + newL2Block.createdAt = now + newL2Block.deltaTimestamp = uint32(uint64(now.Unix()) - prevTimestamp) + newL2Block.timestamp = prevTimestamp + uint64(newL2Block.deltaTimestamp) // Tracking number f.l2BlockCounter++ newL2Block.trackingNum = f.l2BlockCounter - newL2Block.deltaTimestamp = uint32(uint64(now().Unix()) - prevTimestamp) - newL2Block.timestamp = prevTimestamp + uint64(newL2Block.deltaTimestamp) - newL2Block.transactions = []*TxTracker{} f.lastL1InfoTreeMux.Lock() @@ -512,13 +615,13 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, f.wipL2Block = newL2Block - log.Debugf("creating new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v", + log.Debugf("creating new wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged) // We process (execute) the new wip L2 block to update the imStateRoot and also get the counters used by the wip l2block - batchResponse, err := f.executeNewWIPL2Block(ctx) + batchResponse, contextId, err := f.executeNewWIPL2Block(ctx) if err != nil { - f.Halt(ctx, fmt.Errorf("failed to execute new WIP L2 block [%d], error: %v ", f.wipL2Block.trackingNum, err), false) + f.Halt(ctx, fmt.Errorf("failed to execute new wip L2 block [%d], error: %v ", f.wipL2Block.trackingNum, err), false) } if len(batchResponse.BlockResponses) != 1 { @@ -532,23 +635,28 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, // Save the resources used/reserved and subtract the ZKCounters reserved by the new WIP L2 block from the WIP batch // We need to increase the poseidon hashes to reserve in the batch the hashes needed to write the L1InfoRoot when processing the final L2 Block (SkipWriteBlockInfoRoot_V2=false) - f.wipL2Block.usedZKCounters = batchResponse.UsedZkCounters - f.wipL2Block.usedZKCounters.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd - f.wipL2Block.reservedZKCounters = batchResponse.ReservedZkCounters - f.wipL2Block.reservedZKCounters.PoseidonHashes = (batchResponse.ReservedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd + f.wipL2Block.usedZKCountersOnNew = batchResponse.UsedZkCounters + f.wipL2Block.usedZKCountersOnNew.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd + f.wipL2Block.reservedZKCountersOnNew = batchResponse.ReservedZkCounters + f.wipL2Block.reservedZKCountersOnNew.PoseidonHashes = (batchResponse.ReservedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd f.wipL2Block.bytes = changeL2BlockSize + neededZKCounters, newHighZKCounters := getNeededZKCounters(f.wipBatch.imHighReservedZKCounters, f.wipL2Block.usedZKCountersOnNew, f.wipL2Block.reservedZKCountersOnNew) subOverflow := false - fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: f.wipL2Block.reservedZKCounters, Bytes: f.wipL2Block.bytes}) + fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: neededZKCounters, Bytes: f.wipL2Block.bytes}) if fits { - subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes}) + subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCountersOnNew, Bytes: f.wipL2Block.bytes}) if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters - log.Infof("new WIP L2 block [%d] used resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block used counters: %s", - f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCounters)) + log.Infof("new wip L2 block [%d] used resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + f.wipL2Block.trackingNum, overflowResource, + f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters)) } + + f.wipBatch.imHighReservedZKCounters = newHighZKCounters } else { - log.Infof("new WIP L2 block [%d] reserved resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block reserved counters: %s", - f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters)) + log.Infof("new wip L2 block [%d] reserved resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. counters: {batch: %s, used: %s, reserved: %s, needed: %s, high: %s}", + f.wipL2Block.trackingNum, overflowResource, + f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(neededZKCounters), f.logZKCounters(f.wipBatch.imHighReservedZKCounters)) } // If reserved WIP L2 block resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources) @@ -556,19 +664,22 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, if !fits || subOverflow { err := f.closeAndOpenNewWIPBatch(ctx, state.ResourceExhaustedClosingReason) if err != nil { - f.Halt(ctx, fmt.Errorf("failed to create new WIP batch [%d], error: %v", f.wipL2Block.trackingNum, err), true) + f.Halt(ctx, fmt.Errorf("failed to create new wip batch [%d], error: %v", f.wipL2Block.trackingNum, err), true) } } + // We assign the wipBatch as the batch where this wipL2Block belongs + f.wipL2Block.batch = f.wipBatch + f.wipL2Block.metrics.newL2BlockTimes.sequencer = time.Since(processStart) - f.wipL2Block.metrics.newL2BlockTimes.executor - log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, used counters: %s, reserved counters: %s", - f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, - f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, f.logZKCounters(f.wipL2Block.usedZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters)) + log.Infof("created new wip L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, counters: {used: %s, reserved: %s, needed: %s, high: %s}, contextId: %s", + f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, + f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.usedZKCountersOnNew), f.logZKCounters(f.wipL2Block.reservedZKCountersOnNew), f.logZKCounters(f.wipBatch.imHighReservedZKCounters), contextId) } // executeNewWIPL2Block executes an empty L2 Block in the executor and returns the batch response from the executor -func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBatchResponse, error) { +func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBatchResponse, string, error) { batchRequest := state.ProcessRequest{ BatchNumber: f.wipBatch.batchNumber, OldStateRoot: f.wipBatch.imStateRoot, @@ -591,22 +702,38 @@ func (f *finalizer) executeNewWIPL2Block(ctx context.Context) (*state.ProcessBat } executorTime := time.Now() - batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) + batchResponse, contextId, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false) f.wipL2Block.metrics.newL2BlockTimes.executor = time.Since(executorTime) if err != nil { - return nil, err + return nil, contextId, err } if batchResponse.ExecutorError != nil { - return nil, ErrExecutorError + return nil, contextId, ErrExecutorError } if batchResponse.IsRomOOCError { - return nil, ErrProcessBatchOOC + return nil, contextId, ErrProcessBatchOOC } - return batchResponse, nil + return batchResponse, contextId, nil +} + +func (f *finalizer) scheduleNextStateRootSync() { + f.nextStateRootSync = time.Now().Add(f.cfg.StateRootSyncInterval.Duration) +} + +func (f *finalizer) waitPendingL2Blocks() { + // Wait until all L2 blocks are processed/discarded + startWait := time.Now() + f.pendingL2BlocksToProcessWG.Wait() + log.Debugf("waiting for pending L2 blocks to be processed took: %v", time.Since(startWait)) + + // Wait until all L2 blocks are stored + startWait = time.Now() + f.pendingL2BlocksToStoreWG.Wait() + log.Debugf("waiting for pending L2 blocks to be stored took: %v", time.Since(startWait)) } func (f *finalizer) dumpL2Block(l2Block *L2Block) { @@ -619,12 +746,12 @@ func (f *finalizer) dumpL2Block(l2Block *L2Block) { sLog := "" for i, tx := range l2Block.transactions { - sLog += fmt.Sprintf(" tx[%d] hash: %s, from: %s, nonce: %d, gas: %d, gasPrice: %d, bytes: %d, egpPct: %d, used counters: %s, reserved counters: %s\n", + sLog += fmt.Sprintf(" tx[%d] hash: %s, from: %s, nonce: %d, gas: %d, gasPrice: %d, bytes: %d, egpPct: %d, countersOnNew: {used: %s, reserved: %s}\n", i, tx.HashStr, tx.FromStr, tx.Nonce, tx.Gas, tx.GasPrice, tx.Bytes, tx.EGPPercentage, f.logZKCounters(tx.UsedZKCounters), f.logZKCounters(tx.ReservedZKCounters)) } - log.Infof("DUMP L2 block [%d], timestamp: %d, deltaTimestamp: %d, imStateRoot: %s, l1InfoTreeIndex: %d, bytes: %d, used counters: %s, reserved counters: %s\n%s", + log.Infof("dump L2 block [%d], timestamp: %d, deltaTimestamp: %d, imStateRoot: %s, l1InfoTreeIndex: %d, bytes: %d, used counters: %s, reserved counters: %s\n%s", l2Block.trackingNum, l2Block.timestamp, l2Block.deltaTimestamp, l2Block.imStateRoot, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.bytes, - f.logZKCounters(l2Block.usedZKCounters), f.logZKCounters(l2Block.reservedZKCounters), sLog) + f.logZKCounters(l2Block.usedZKCountersOnNew), f.logZKCounters(l2Block.reservedZKCountersOnNew), sLog) sLog = "" if blockResp != nil { @@ -634,7 +761,7 @@ func (f *finalizer) dumpL2Block(l2Block *L2Block) { txResp.EffectivePercentage, txResp.HasGaspriceOpcode, txResp.HasBalanceOpcode) } - log.Infof("DUMP L2 block %d [%d] response, timestamp: %d, parentHash: %s, coinbase: %s, ger: %s, blockHashL1: %s, gasUsed: %d, blockInfoRoot: %s, blockHash: %s, used counters: %s, reserved counters: %s\n%s", + log.Infof("dump L2 block %d [%d] response, timestamp: %d, parentHash: %s, coinbase: %s, ger: %s, blockHashL1: %s, gasUsed: %d, blockInfoRoot: %s, blockHash: %s, counters: {used: %s, reserved: %s}\n%s", blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1, blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash, f.logZKCounters(l2Block.batchResponse.UsedZkCounters), f.logZKCounters(l2Block.batchResponse.ReservedZkCounters), sLog) } diff --git a/sequencer/metrics.go b/sequencer/metrics.go index 2be977e23e..5481587399 100644 --- a/sequencer/metrics.go +++ b/sequencer/metrics.go @@ -6,6 +6,7 @@ import ( "time" ) +// SEQUENTIAL L2 BLOCK PROCESSING // |-----------------------------------------------------------------------------| -> totalTime // |------------| |-------------------------| -> transactionsTime // |-newL2Block-|----tx 1----| |---tx 2---|-----tx 3-----| |-----l2Block-----| @@ -14,6 +15,20 @@ import ( // idle | |iiii| | |ii| | -> idleTime // +// PARALLEL L2 BLOCK PROCESSING +// |---------------------------------------------------------------------------------------------| -> totalTime +// |-----------------------L2 block 1-----------------------| |-----------L2 block 2------------| +// |------------| |-------------------------| |--------------------| -> transactionsTime +// |-newL2Block-|----tx 1----| |---tx 2---|-----tx 3-----| |-newL2Block-|--tx 4---|---tx 5---| +// sequencer |sssss ss|sss ss| |sss ss|sss ss| |sssss ss|ss ss|sss ss| -> sequencerTime +// executor | xxxxx | xxxxxxx | | xxxxx | xxxxxxxxx | | xxxxx | xxxxxx | xxxxx | -> executorTime +// idle | |iiii| | |ii| | -> idleTime + +// | -> L2 block 1 | +// seq-l2block | |ssss ss| +// exe-l2block | | xxxxxxxxxxx | +// + type processTimes struct { sequencer time.Duration executor time.Duration @@ -41,9 +56,11 @@ type metrics struct { newL2BlockTimes processTimes transactionsTimes processTimes l2BlockTimes processTimes + waitl2BlockTime time.Duration gas uint64 estimatedTxsPerSec float64 estimatedGasPerSec uint64 + sequential bool } func (m *metrics) sub(mSub metrics) { @@ -53,6 +70,7 @@ func (m *metrics) sub(mSub metrics) { m.newL2BlockTimes.sub(mSub.newL2BlockTimes) m.transactionsTimes.sub(mSub.transactionsTimes) m.l2BlockTimes.sub(mSub.l2BlockTimes) + m.waitl2BlockTime -= mSub.waitl2BlockTime m.gas -= mSub.gas } @@ -63,32 +81,57 @@ func (m *metrics) sumUp(mSumUp metrics) { m.newL2BlockTimes.sumUp(mSumUp.newL2BlockTimes) m.transactionsTimes.sumUp(mSumUp.transactionsTimes) m.l2BlockTimes.sumUp(mSumUp.l2BlockTimes) + m.waitl2BlockTime += mSumUp.waitl2BlockTime m.gas += mSumUp.gas } func (m *metrics) executorTime() time.Duration { - return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.l2BlockTimes.executor + if m.sequential { + return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.l2BlockTimes.executor + } else { + return m.newL2BlockTimes.executor + m.transactionsTimes.executor + m.waitl2BlockTime + } } func (m *metrics) sequencerTime() time.Duration { - return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer + m.l2BlockTimes.sequencer + if m.sequential { + return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer + m.l2BlockTimes.sequencer + } else { + return m.newL2BlockTimes.sequencer + m.transactionsTimes.sequencer + } } func (m *metrics) totalTime() time.Duration { - return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.l2BlockTimes.total() + m.idleTime + if m.sequential { + return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.l2BlockTimes.total() + m.idleTime + } else { + return m.newL2BlockTimes.total() + m.transactionsTimes.total() + m.waitl2BlockTime + m.idleTime + } } -func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64) { +func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64, sequential bool) { // Compute pending fields m.closedAt = time.Now() totalTime := time.Since(createdAt) + m.sequential = sequential m.l2BlockTxsCount = l2BlockTxsCount - m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.l2BlockTimes.total() + + if m.sequential { + m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.l2BlockTimes.total() + } else { + m.transactionsTimes.sequencer = totalTime - m.idleTime - m.newL2BlockTimes.total() - m.transactionsTimes.executor - m.waitl2BlockTime + } // Compute performance if m.processedTxsCount > 0 { - // timePerTxuS is the average time spent per tx. This includes the l2Block time since the processing time of this section is proportional to the number of txs - timePerTxuS := (m.transactionsTimes.total() + m.l2BlockTimes.total()).Microseconds() / m.processedTxsCount + var timePerTxuS int64 + if m.sequential { + // timePerTxuS is the average time spent per tx. This includes the l2Block time since the processing time of this section is proportional to the number of txs + timePerTxuS = (m.transactionsTimes.total() + m.l2BlockTimes.total()).Microseconds() / m.processedTxsCount + } else { + // timePerTxuS is the average time spent per tx. This includes the waitl2Block + timePerTxuS = (m.transactionsTimes.total() + m.waitl2BlockTime).Microseconds() / m.processedTxsCount + } // estimatedTxs is the number of transactions that we estimate could have been processed in the block estimatedTxs := float64(totalTime.Microseconds()-m.newL2BlockTimes.total().Microseconds()) / float64(timePerTxuS) // estimatedTxxPerSec is the estimated transactions per second (rounded to 2 decimal digits) @@ -102,8 +145,8 @@ func (m *metrics) close(createdAt time.Time, l2BlockTxsCount int64) { } func (m *metrics) log() string { - return fmt.Sprintf("blockTxs: %d, txs: %d, gas: %d, txsSec: %.2f, gasSec: %d, time: {total: %d, idle: %d, sequencer: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}, executor: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}", - m.l2BlockTxsCount, m.processedTxsCount, m.gas, m.estimatedTxsPerSec, m.estimatedGasPerSec, m.totalTime().Microseconds(), m.idleTime.Microseconds(), + return fmt.Sprintf("blockTxs: %d, txs: %d, gas: %d, txsSec: %.2f, gasSec: %d, time: {total: %d, idle: %d, waitL2Block: %d, sequencer: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}, executor: {total: %d, newL2Block: %d, txs: %d, l2Block: %d}", + m.l2BlockTxsCount, m.processedTxsCount, m.gas, m.estimatedTxsPerSec, m.estimatedGasPerSec, m.totalTime().Microseconds(), m.idleTime.Microseconds(), m.waitl2BlockTime.Microseconds(), m.sequencerTime().Microseconds(), m.newL2BlockTimes.sequencer.Microseconds(), m.transactionsTimes.sequencer.Microseconds(), m.l2BlockTimes.sequencer.Microseconds(), m.executorTime().Microseconds(), m.newL2BlockTimes.executor.Microseconds(), m.transactionsTimes.executor.Microseconds(), m.l2BlockTimes.executor.Microseconds()) } diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go index 2a53e28c25..f7f3861d4b 100644 --- a/sequencer/mock_state.go +++ b/sequencer/mock_state.go @@ -982,7 +982,7 @@ func (_m *StateMock) OpenWIPBatch(ctx context.Context, batch state.Batch, dbTx p } // ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree -func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) { +func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) { ret := _m.Called(ctx, request, updateMerkleTree) if len(ret) == 0 { @@ -990,8 +990,9 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe } var r0 *state.ProcessBatchResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok { + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok { return rf(ctx, request, updateMerkleTree) } if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok { @@ -1002,13 +1003,19 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe } } - if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok { r1 = rf(ctx, request, updateMerkleTree) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(string) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok { + r2 = rf(ctx, request, updateMerkleTree) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // StoreL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, txsEGPLog, dbTx diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go index 215cd08c8e..3ff546f724 100644 --- a/sequencer/mock_worker.go +++ b/sequencer/mock_worker.go @@ -25,11 +25,6 @@ func (_m *WorkerMock) AddForcedTx(txHash common.Hash, addr common.Address) { _m.Called(txHash, addr) } -// AddPendingTxToStore provides a mock function with given fields: txHash, addr -func (_m *WorkerMock) AddPendingTxToStore(txHash common.Hash, addr common.Address) { - _m.Called(txHash, addr) -} - // AddTxTracker provides a mock function with given fields: ctx, txTracker func (_m *WorkerMock) AddTxTracker(ctx context.Context, txTracker *TxTracker) (*TxTracker, error) { ret := _m.Called(ctx, txTracker) @@ -65,19 +60,19 @@ func (_m *WorkerMock) DeleteForcedTx(txHash common.Hash, addr common.Address) { _m.Called(txHash, addr) } -// DeletePendingTxToStore provides a mock function with given fields: txHash, addr -func (_m *WorkerMock) DeletePendingTxToStore(txHash common.Hash, addr common.Address) { - _m.Called(txHash, addr) -} - // DeleteTx provides a mock function with given fields: txHash, from func (_m *WorkerMock) DeleteTx(txHash common.Hash, from common.Address) { _m.Called(txHash, from) } -// GetBestFittingTx provides a mock function with given fields: resources -func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) { - ret := _m.Called(resources) +// DeleteTxPendingToStore provides a mock function with given fields: txHash, addr +func (_m *WorkerMock) DeleteTxPendingToStore(txHash common.Hash, addr common.Address) { + _m.Called(txHash, addr) +} + +// GetBestFittingTx provides a mock function with given fields: remainingResources, highReservedCounters +func (_m *WorkerMock) GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error) { + ret := _m.Called(remainingResources, highReservedCounters) if len(ret) == 0 { panic("no return value specified for GetBestFittingTx") @@ -85,19 +80,19 @@ func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTrack var r0 *TxTracker var r1 error - if rf, ok := ret.Get(0).(func(state.BatchResources) (*TxTracker, error)); ok { - return rf(resources) + if rf, ok := ret.Get(0).(func(state.BatchResources, state.ZKCounters) (*TxTracker, error)); ok { + return rf(remainingResources, highReservedCounters) } - if rf, ok := ret.Get(0).(func(state.BatchResources) *TxTracker); ok { - r0 = rf(resources) + if rf, ok := ret.Get(0).(func(state.BatchResources, state.ZKCounters) *TxTracker); ok { + r0 = rf(remainingResources, highReservedCounters) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*TxTracker) } } - if rf, ok := ret.Get(1).(func(state.BatchResources) error); ok { - r1 = rf(resources) + if rf, ok := ret.Get(1).(func(state.BatchResources, state.ZKCounters) error); ok { + r1 = rf(remainingResources, highReservedCounters) } else { r1 = ret.Error(1) } @@ -105,6 +100,11 @@ func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTrack return r0, r1 } +// MoveTxPendingToStore provides a mock function with given fields: txHash, addr +func (_m *WorkerMock) MoveTxPendingToStore(txHash common.Hash, addr common.Address) { + _m.Called(txHash, addr) +} + // MoveTxToNotReady provides a mock function with given fields: txHash, from, actualNonce, actualBalance func (_m *WorkerMock) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker { ret := _m.Called(txHash, from, actualNonce, actualBalance) @@ -155,6 +155,38 @@ func (_m *WorkerMock) NewTxTracker(tx types.Transaction, usedZKcounters state.ZK return r0, r1 } +// RestoreTxsPendingToStore provides a mock function with given fields: ctx +func (_m *WorkerMock) RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RestoreTxsPendingToStore") + } + + var r0 []*TxTracker + var r1 []*TxTracker + if rf, ok := ret.Get(0).(func(context.Context) ([]*TxTracker, []*TxTracker)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []*TxTracker); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*TxTracker) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) []*TxTracker); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]*TxTracker) + } + } + + return r0, r1 +} + // UpdateAfterSingleSuccessfulTxExecution provides a mock function with given fields: from, touchedAddresses func (_m *WorkerMock) UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker { ret := _m.Called(from, touchedAddresses) diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go index 64b5711fae..f0859414a8 100644 --- a/sequencer/sequencer.go +++ b/sequencer/sequencer.go @@ -75,7 +75,7 @@ func (s *Sequencer) Start(ctx context.Context) { err := s.pool.MarkWIPTxsAsPending(ctx) if err != nil { - log.Fatalf("failed to mark WIP txs as pending, error: %v", err) + log.Fatalf("failed to mark wip txs as pending, error: %v", err) } // Start stream server if enabled @@ -93,8 +93,6 @@ func (s *Sequencer) Start(ctx context.Context) { s.updateDataStreamerFile(ctx, s.cfg.StreamServer.ChainID) } - go s.loadFromPool(ctx) - if s.streamServer != nil { go s.sendDataToStreamer(s.cfg.StreamServer.ChainID) } @@ -104,6 +102,8 @@ func (s *Sequencer) Start(ctx context.Context) { s.finalizer = newFinalizer(s.cfg.Finalizer, s.poolCfg, s.worker, s.pool, s.stateIntf, s.etherman, s.address, s.isSynced, s.batchCfg.Constraints, s.eventLog, s.streamServer, s.workerReadyTxsCond, s.dataToStream) go s.finalizer.Start(ctx) + go s.loadFromPool(ctx) + go s.deleteOldPoolTxs(ctx) go s.expireOldWorkerTxs(ctx) @@ -147,6 +147,11 @@ func (s *Sequencer) updateDataStreamerFile(ctx context.Context, chainID uint64) func (s *Sequencer) deleteOldPoolTxs(ctx context.Context) { for { time.Sleep(s.cfg.DeletePoolTxsCheckInterval.Duration) + + if s.finalizer.haltFinalizer.Load() { + return + } + log.Infof("trying to get txs to delete from the pool...") earliestTxHash, err := s.pool.GetEarliestProcessedTx(ctx) if err != nil { @@ -181,6 +186,11 @@ func (s *Sequencer) deleteOldPoolTxs(ctx context.Context) { func (s *Sequencer) expireOldWorkerTxs(ctx context.Context) { for { time.Sleep(s.cfg.TxLifetimeCheckInterval.Duration) + + if s.finalizer.haltFinalizer.Load() { + return + } + txTrackers := s.worker.ExpireTransactions(s.cfg.TxLifetimeMax.Duration) failedReason := ErrExpiredTransaction.Error() for _, txTracker := range txTrackers { @@ -195,6 +205,10 @@ func (s *Sequencer) expireOldWorkerTxs(ctx context.Context) { // loadFromPool keeps loading transactions from the pool func (s *Sequencer) loadFromPool(ctx context.Context) { for { + if s.finalizer.haltFinalizer.Load() { + return + } + poolTransactions, err := s.pool.GetNonWIPPendingTxs(ctx) if err != nil && err != pool.ErrNotFound { log.Errorf("error loading txs from pool, error: %v", err) diff --git a/sequencer/waitgroupcount.go b/sequencer/waitgroupcount.go new file mode 100644 index 0000000000..436f088514 --- /dev/null +++ b/sequencer/waitgroupcount.go @@ -0,0 +1,29 @@ +package sequencer + +import ( + "sync" + "sync/atomic" +) + +// WaitGroupCount implements a sync.WaitGroup that also has a field to get the WaitGroup counter +type WaitGroupCount struct { + sync.WaitGroup + count atomic.Int32 +} + +// Add adds delta to the WaitGroup and increase the counter +func (wg *WaitGroupCount) Add(delta int) { + wg.count.Add(int32(delta)) + wg.WaitGroup.Add(delta) +} + +// Done decrements the WaitGroup and counter by one +func (wg *WaitGroupCount) Done() { + wg.count.Add(-1) + wg.WaitGroup.Done() +} + +// Count returns the counter of the WaitGroup +func (wg *WaitGroupCount) Count() int { + return int(wg.count.Load()) +} diff --git a/sequencer/worker.go b/sequencer/worker.go index 0d0b378872..c6be5ed5ab 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -19,7 +19,9 @@ import ( type Worker struct { pool map[string]*addrQueue txSortedList *txSortedList - workerMutex sync.Mutex + pendingToStore []*TxTracker + reorgedTxs []*TxTracker + workerMutex *sync.Mutex state stateInterface batchConstraints state.BatchConstraintsCfg readyTxsCond *timeoutCond @@ -30,7 +32,9 @@ type Worker struct { func NewWorker(state stateInterface, constraints state.BatchConstraintsCfg, readyTxsCond *timeoutCond) *Worker { w := Worker{ pool: make(map[string]*addrQueue), + workerMutex: new(sync.Mutex), txSortedList: newTxSortedList(), + pendingToStore: []*TxTracker{}, state: state, batchConstraints: constraints, readyTxsCond: readyTxsCond, @@ -46,31 +50,36 @@ func (w *Worker) NewTxTracker(tx types.Transaction, usedZKCounters state.ZKCount // AddTxTracker adds a new Tx to the Worker func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *TxTracker, dropReason error) { - w.workerMutex.Lock() + return w.addTxTracker(ctx, tx, w.workerMutex) +} + +// addTxTracker adds a new Tx to the Worker +func (w *Worker) addTxTracker(ctx context.Context, tx *TxTracker, mutex *sync.Mutex) (replacedTx *TxTracker, dropReason error) { + mutexLock(mutex) // Make sure the IP is valid. if tx.IP != "" && !pool.IsValidIP(tx.IP) { - w.workerMutex.Unlock() + mutexUnlock(mutex) return nil, pool.ErrInvalidIP } // Make sure the transaction's reserved ZKCounters are within the constraints. if !w.batchConstraints.IsWithinConstraints(tx.ReservedZKCounters) { log.Errorf("outOfCounters error (node level) for tx %s", tx.Hash.String()) - w.workerMutex.Unlock() + mutexUnlock(mutex) return nil, pool.ErrOutOfCounters } if (w.wipTx != nil) && (w.wipTx.FromStr == tx.FromStr) && (w.wipTx.Nonce == tx.Nonce) { log.Infof("adding tx %s (nonce %d) from address %s that matches current processing tx %s (nonce %d), rejecting it as duplicated nonce", tx.Hash, tx.Nonce, tx.From, w.wipTx.Hash, w.wipTx.Nonce) - w.workerMutex.Unlock() + mutexUnlock(mutex) return nil, ErrDuplicatedNonce } addr, found := w.pool[tx.FromStr] if !found { // Unlock the worker to let execute other worker functions while creating the new AddrQueue - w.workerMutex.Unlock() + mutexUnlock(mutex) root, err := w.state.GetLastStateRoot(ctx, nil) if err != nil { @@ -94,7 +103,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T addr = newAddrQueue(tx.From, nonce.Uint64(), balance) // Lock again the worker - w.workerMutex.Lock() + mutexLock(mutex) w.pool[tx.FromStr] = addr log.Debugf("new addrQueue %s created (nonce: %d, balance: %s)", tx.FromStr, nonce.Uint64(), balance.String()) @@ -106,7 +115,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T newReadyTx, prevReadyTx, repTx, dropReason = addr.addTx(tx) if dropReason != nil { log.Infof("dropped tx %s from addrQueue %s, reason: %s", tx.HashStr, tx.FromStr, dropReason.Error()) - w.workerMutex.Unlock() + mutexUnlock(mutex) return repTx, dropReason } @@ -124,7 +133,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T log.Debugf("tx %s (nonce: %d, gasPrice: %d, addr: %s) has been replaced", repTx.HashStr, repTx.Nonce, repTx.GasPrice, tx.FromStr) } - w.workerMutex.Unlock() + mutexUnlock(mutex) return repTx, nil } @@ -199,6 +208,28 @@ func (w *Worker) MoveTxToNotReady(txHash common.Hash, from common.Address, actua return txsToDelete } +// deleteTx deletes a regular tx from the addrQueue +func (w *Worker) deleteTx(txHash common.Hash, addr common.Address) *TxTracker { + addrQueue, found := w.pool[addr.String()] + if found { + deletedTx, isReady := addrQueue.deleteTx(txHash) + if deletedTx != nil { + if isReady { + log.Debugf("tx %s deleted from TxSortedList", deletedTx.Hash) + w.txSortedList.delete(deletedTx) + } + } else { + log.Warnf("tx %s not found in addrQueue %s", txHash, addr) + } + + return deletedTx + } else { + log.Warnf("addrQueue %s not found", addr) + + return nil + } +} + // DeleteTx deletes a regular tx from the addrQueue func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) { w.workerMutex.Lock() @@ -206,16 +237,7 @@ func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) { w.resetWipTx(txHash) - addrQueue, found := w.pool[addr.String()] - if found { - deletedReadyTx := addrQueue.deleteTx(txHash) - if deletedReadyTx != nil { - log.Debugf("tx %s deleted from TxSortedList", deletedReadyTx.Hash.String()) - w.txSortedList.delete(deletedReadyTx) - } - } else { - log.Warnf("addrQueue %s not found", addr.String()) - } + w.deleteTx(txHash, addr) } // DeleteForcedTx deletes a forced tx from the addrQueue @@ -257,55 +279,153 @@ func (w *Worker) UpdateTxZKCounters(txHash common.Hash, addr common.Address, use } } -// AddPendingTxToStore adds a tx to the addrQueue list of pending txs to store in the DB (trusted state) -func (w *Worker) AddPendingTxToStore(txHash common.Hash, addr common.Address) { +// MoveTxPendingToStore moves a tx to pending to store list +func (w *Worker) MoveTxPendingToStore(txHash common.Hash, addr common.Address) { + // TODO: Add test for this function + w.workerMutex.Lock() defer w.workerMutex.Unlock() - addrQueue, found := w.pool[addr.String()] + // Delete from worker pool and addrQueue + deletedTx := w.deleteTx(txHash, addr) - if found { + // Add tx to pending to store list in worker + if deletedTx != nil { + w.pendingToStore = append(w.pendingToStore, deletedTx) + log.Debugf("tx %s add to pendingToStore, order: %d", deletedTx.Hash, len(w.pendingToStore)) + } else { + log.Warnf("tx %s not found when moving it to pending to store, address: %s", txHash, addr) + } + + // Add tx to pending to store list in addrQueue + if addrQueue, found := w.pool[addr.String()]; found { addrQueue.addPendingTxToStore(txHash) } else { - log.Warnf("addrQueue %s not found", addr.String()) + log.Warnf("addrQueue %s not found when moving tx %s to pending to store", addr, txHash) } } +// RestoreTxsPendingToStore restores the txs pending to store and move them to the worker pool to be processed again +func (w *Worker) RestoreTxsPendingToStore(ctx context.Context) ([]*TxTracker, []*TxTracker) { + // TODO: Add test for this function + // TODO: We need to process restored txs in the same order we processed initially + + w.workerMutex.Lock() + + addrList := make(map[common.Address]struct{}) + txsList := []*TxTracker{} + w.reorgedTxs = []*TxTracker{} + + // Add txs pending to store to the list that will include all the txs to reprocess again + // Add txs to the reorgedTxs list to get them in the order which they were processed before the L2 block reorg + // Get also the addresses of theses txs since we will need to recreate them + for _, txToStore := range w.pendingToStore { + txsList = append(txsList, txToStore) + w.reorgedTxs = append(w.reorgedTxs, txToStore) + addrList[txToStore.From] = struct{}{} + } + + // Add txs from addrQueues that will be recreated and delete addrQueues from the pool list + for addr := range addrList { + addrQueue, found := w.pool[addr.String()] + if found { + txsList = append(txsList, addrQueue.getTransactions()...) + if addrQueue.readyTx != nil { + // Delete readyTx from the txSortedList + w.txSortedList.delete(addrQueue.readyTx) + } + // Delete the addrQueue to recreate it later + delete(w.pool, addr.String()) + } + } + + // Clear pendingToStore list + w.pendingToStore = []*TxTracker{} + // Clear wip tx + w.wipTx = nil + + for _, tx := range w.reorgedTxs { + log.Infof("reorged tx %s, nonce %d, from: %s", tx.Hash, tx.Nonce, tx.From) + } + + replacedTxs := []*TxTracker{} + droppedTxs := []*TxTracker{} + // Add again in the worker the txs to restore (this will recreate addrQueues) + for _, restoredTx := range txsList { + replacedTx, dropReason := w.addTxTracker(ctx, restoredTx, nil) + if dropReason != nil { + droppedTxs = append(droppedTxs, restoredTx) + } + if replacedTx != nil { + droppedTxs = append(replacedTxs, restoredTx) + } + } + + w.workerMutex.Unlock() + + // In this scenario we shouldn't have dropped or replaced txs but we return it just in case + return droppedTxs, replacedTxs +} + // AddForcedTx adds a forced tx to the addrQueue func (w *Worker) AddForcedTx(txHash common.Hash, addr common.Address) { w.workerMutex.Lock() defer w.workerMutex.Unlock() - addrQueue, found := w.pool[addr.String()] - - if found { + if addrQueue, found := w.pool[addr.String()]; found { addrQueue.addForcedTx(txHash) } else { log.Warnf("addrQueue %s not found", addr.String()) } } -// DeletePendingTxToStore delete a tx from the addrQueue list of pending txs to store in the DB (trusted state) -func (w *Worker) DeletePendingTxToStore(txHash common.Hash, addr common.Address) { +// DeleteTxPendingToStore delete a tx from the addrQueue list of pending txs to store in the DB (trusted state) +func (w *Worker) DeleteTxPendingToStore(txHash common.Hash, addr common.Address) { w.workerMutex.Lock() defer w.workerMutex.Unlock() - addrQueue, found := w.pool[addr.String()] + // Delete tx from pending to store list in worker + found := false + for i, txToStore := range w.pendingToStore { + if txToStore.Hash == txHash { + found = true + w.pendingToStore = append(w.pendingToStore[:i], w.pendingToStore[i+1:]...) + } + } + if !found { + log.Warnf("tx %s not found when deleting it from worker pool", txHash) + } - if found { + // Delete tx from pending to store list in addrQueue + if addrQueue, found := w.pool[addr.String()]; found { addrQueue.deletePendingTxToStore(txHash) } else { - log.Warnf("addrQueue %s not found", addr.String()) + log.Warnf("addrQueue %s not found when deleting pending to store tx %s", addr, txHash) } } // GetBestFittingTx gets the most efficient tx that fits in the available batch resources -func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) { +func (w *Worker) GetBestFittingTx(remainingResources state.BatchResources, highReservedCounters state.ZKCounters) (*TxTracker, error) { w.workerMutex.Lock() defer w.workerMutex.Unlock() w.wipTx = nil + // If we are processing a L2 block reorg we return the next tx in the reorg list + for len(w.reorgedTxs) > 0 { + reorgedTx := w.reorgedTxs[0] + w.reorgedTxs = w.reorgedTxs[1:] + if addrQueue, found := w.pool[reorgedTx.FromStr]; found { + if addrQueue.readyTx != nil && addrQueue.readyTx.Hash == reorgedTx.Hash { + return reorgedTx, nil + } else { + log.Warnf("reorged tx %s is not the ready tx for addrQueue %s, this shouldn't happen", reorgedTx.Hash, reorgedTx.From) + } + } else { + log.Warnf("addrQueue %s for reorged tx %s not found, this shouldn't happen", reorgedTx.From, reorgedTx.Hash) + } + } + if w.txSortedList.len() == 0 { return nil, ErrTransactionsListEmpty } @@ -334,8 +454,9 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e foundMutex.RUnlock() txCandidate := w.txSortedList.getByIndex(i) - overflow, _ := bresources.Sub(state.BatchResources{ZKCounters: txCandidate.ReservedZKCounters, Bytes: txCandidate.Bytes}) - if overflow { + needed, _ := getNeededZKCounters(highReservedCounters, txCandidate.UsedZKCounters, txCandidate.ReservedZKCounters) + fits, _ := bresources.Fits(state.BatchResources{ZKCounters: needed, Bytes: txCandidate.Bytes}) + if !fits { // We don't add this Tx continue } @@ -349,7 +470,7 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e return } - }(i, resources) + }(i, remainingResources) } wg.Wait() @@ -402,3 +523,15 @@ func (w *Worker) resetWipTx(txHash common.Hash) { w.wipTx = nil } } + +func mutexLock(mutex *sync.Mutex) { + if mutex != nil { + mutex.Lock() + } +} + +func mutexUnlock(mutex *sync.Mutex) { + if mutex != nil { + mutex.Unlock() + } +} diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go index a86d7a2f3f..0e2375ad37 100644 --- a/sequencer/worker_test.go +++ b/sequencer/worker_test.go @@ -258,7 +258,7 @@ func TestWorkerGetBestTx(t *testing.T) { ct := 0 for { - tx, _ := worker.GetBestFittingTx(rc) + tx, _ := worker.GetBestFittingTx(rc, state.ZKCounters{}) if tx != nil { if ct >= len(expectedGetBestTx) { t.Fatalf("Error getting more best tx than expected. Expected=%d, Actual=%d", len(expectedGetBestTx), ct+1) diff --git a/state/batch.go b/state/batch.go index 7cf10ebeab..f5325a08c7 100644 --- a/state/batch.go +++ b/state/batch.go @@ -42,11 +42,12 @@ type Batch struct { AccInputHash common.Hash // Timestamp (<=incaberry) -> batch time // (>incaberry) -> minTimestamp used in batch creation, real timestamp is in virtual_batch.batch_timestamp - Timestamp time.Time - Transactions []types.Transaction - GlobalExitRoot common.Hash - ForcedBatchNum *uint64 - Resources BatchResources + Timestamp time.Time + Transactions []types.Transaction + GlobalExitRoot common.Hash + ForcedBatchNum *uint64 + Resources BatchResources + HighReservedZKCounters ZKCounters // WIP: if WIP == true is a openBatch WIP bool } @@ -83,6 +84,8 @@ const ( MaxDeltaTimestampClosingReason ClosingReason = "Max delta timestamp" // NoTxFitsClosingReason is the closing reason used when any of the txs in the pool (worker) fits in the remaining resources of the batch NoTxFitsClosingReason ClosingReason = "No transaction fits" + // L2BlockReorgClonsingReason is the closing reason used when we have a L2 block reorg (unexpected error, like OOC, when processing L2 block) + L2BlockReorgClonsingReason ClosingReason = "L2 block reorg" // Reason due Synchronizer // ------------------------------------------------------------------------------------------ @@ -109,9 +112,10 @@ type ProcessingReceipt struct { GlobalExitRoot common.Hash AccInputHash common.Hash // Txs []types.Transaction - BatchL2Data []byte - ClosingReason ClosingReason - BatchResources BatchResources + BatchL2Data []byte + ClosingReason ClosingReason + BatchResources BatchResources + HighReservedZKCounters ZKCounters } // VerifiedBatch represents a VerifiedBatch diff --git a/state/batchV2.go b/state/batchV2.go index 31c65c7783..6947f02160 100644 --- a/state/batchV2.go +++ b/state/batchV2.go @@ -37,7 +37,7 @@ type ProcessingContextV2 struct { } // ProcessBatchV2 processes a batch for forkID >= ETROG -func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, updateMerkleTree bool) (*ProcessBatchResponse, error) { +func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, updateMerkleTree bool) (*ProcessBatchResponse, string, error) { updateMT := uint32(cFalse) if updateMerkleTree { updateMT = cTrue @@ -84,16 +84,16 @@ func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, upda res, err := s.sendBatchRequestToExecutorV2(ctx, processBatchRequest, request.Caller) if err != nil { - return nil, err + return nil, "", err } var result *ProcessBatchResponse result, err = s.convertToProcessBatchResponseV2(res) if err != nil { - return nil, err + return nil, "", err } - return result, nil + return result, processBatchRequest.ContextId, nil } // ExecuteBatchV2 is used by the synchronizer to reprocess batches to compare generated state root vs stored one diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go index 8b242dce66..829dd56b40 100644 --- a/state/pgstatestorage/batch.go +++ b/state/pgstatestorage/batch.go @@ -83,7 +83,7 @@ func (p *PostgresStorage) GetVerifiedBatch(ctx context.Context, batchNumber uint // GetLastNBatches returns the last numBatches batches. func (p *PostgresStorage) GetLastNBatches(ctx context.Context, numBatches uint, dbTx pgx.Tx) ([]*state.Batch, error) { - const getLastNBatchesSQL = "SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip from state.batch ORDER BY batch_num DESC LIMIT $1" + const getLastNBatchesSQL = "SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip from state.batch ORDER BY batch_num DESC LIMIT $1" e := p.getExecQuerier(dbTx) rows, err := e.Query(ctx, getLastNBatchesSQL, numBatches) @@ -256,7 +256,7 @@ func (p *PostgresStorage) SetInitSyncBatch(ctx context.Context, batchNumber uint // GetBatchByNumber returns the batch with the given number. func (p *PostgresStorage) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { const getBatchByNumberSQL = ` - SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip + SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip FROM state.batch WHERE batch_num = $1` @@ -276,7 +276,7 @@ func (p *PostgresStorage) GetBatchByNumber(ctx context.Context, batchNumber uint // GetBatchByTxHash returns the batch including the given tx func (p *PostgresStorage) GetBatchByTxHash(ctx context.Context, transactionHash common.Hash, dbTx pgx.Tx) (*state.Batch, error) { const getBatchByTxHashSQL = ` - SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, b.batch_resources, b.wip + SELECT b.batch_num, b.global_exit_root, b.local_exit_root, b.acc_input_hash, b.state_root, b.timestamp, b.coinbase, b.raw_txs_data, b.forced_batch_num, b.batch_resources, b.high_reserved_counters, b.wip FROM state.transaction t, state.batch b, state.l2block l WHERE t.hash = $1 AND l.block_num = t.l2_block_num AND b.batch_num = l.batch_num` @@ -295,7 +295,7 @@ func (p *PostgresStorage) GetBatchByTxHash(ctx context.Context, transactionHash // GetBatchByL2BlockNumber returns the batch related to the l2 block accordingly to the provided l2 block number. func (p *PostgresStorage) GetBatchByL2BlockNumber(ctx context.Context, l2BlockNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { const getBatchByL2BlockNumberSQL = ` - SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.wip + SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.high_reserved_counters, bt.wip FROM state.batch bt INNER JOIN state.l2block bl ON bt.batch_num = bl.batch_num @@ -328,6 +328,7 @@ func (p *PostgresStorage) GetVirtualBatchByNumber(ctx context.Context, batchNumb raw_txs_data, forced_batch_num, batch_resources, + high_reserved_counters, wip FROM state.batch @@ -385,13 +386,14 @@ func (p *PostgresStorage) IsSequencingTXSynced(ctx context.Context, transactionH func scanBatch(row pgx.Row) (state.Batch, error) { batch := state.Batch{} var ( - gerStr string - lerStr *string - aihStr *string - stateStr *string - coinbaseStr string - resourcesData []byte - wip bool + gerStr string + lerStr *string + aihStr *string + stateStr *string + coinbaseStr string + resourcesData []byte + highReservedCounters []byte + wip bool ) err := row.Scan( &batch.BatchNumber, @@ -404,6 +406,7 @@ func scanBatch(row pgx.Row) (state.Batch, error) { &batch.BatchL2Data, &batch.ForcedBatchNum, &resourcesData, + &highReservedCounters, &wip, ) if err != nil { @@ -426,6 +429,14 @@ func scanBatch(row pgx.Row) (state.Batch, error) { return batch, err } } + + if highReservedCounters != nil { + err = json.Unmarshal(highReservedCounters, &batch.HighReservedZKCounters) + if err != nil { + return batch, err + } + } + batch.WIP = wip batch.Coinbase = common.HexToAddress(coinbaseStr) @@ -662,7 +673,7 @@ func (p *PostgresStorage) CloseWIPBatchInStorage(ctx context.Context, receipt st // GetWIPBatchInStorage returns the wip batch in the state func (p *PostgresStorage) GetWIPBatchInStorage(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) { const getWIPBatchByNumberSQL = ` - SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip + SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip FROM state.batch WHERE batch_num = $1 AND wip = TRUE` @@ -777,6 +788,7 @@ func (p *PostgresStorage) GetVirtualBatchToProve(ctx context.Context, lastVerfie b.raw_txs_data, b.forced_batch_num, b.batch_resources, + b.high_reserved_counters, b.wip FROM state.batch b, @@ -841,7 +853,7 @@ func (p *PostgresStorage) GetSequences(ctx context.Context, lastVerifiedBatchNum // GetLastClosedBatch returns the latest closed batch func (p *PostgresStorage) GetLastClosedBatch(ctx context.Context, dbTx pgx.Tx) (*state.Batch, error) { const getLastClosedBatchSQL = ` - SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.wip + SELECT bt.batch_num, bt.global_exit_root, bt.local_exit_root, bt.acc_input_hash, bt.state_root, bt.timestamp, bt.coinbase, bt.raw_txs_data, bt.forced_batch_num, bt.batch_resources, bt.high_reserved_counters, bt.wip FROM state.batch bt WHERE wip = FALSE ORDER BY bt.batch_num DESC @@ -890,14 +902,20 @@ func (p *PostgresStorage) UpdateBatchL2Data(ctx context.Context, batchNumber uin // UpdateWIPBatch updates the data in a batch func (p *PostgresStorage) UpdateWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error { - const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2, global_exit_root = $3, state_root = $4, local_exit_root = $5, batch_resources = $6 WHERE batch_num = $1" + const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2, global_exit_root = $3, state_root = $4, local_exit_root = $5, batch_resources = $6, high_reserved_counters = $7 WHERE batch_num = $1" e := p.getExecQuerier(dbTx) batchResourcesJsonBytes, err := json.Marshal(receipt.BatchResources) if err != nil { return err } - _, err = e.Exec(ctx, updateL2DataSQL, receipt.BatchNumber, receipt.BatchL2Data, receipt.GlobalExitRoot.String(), receipt.StateRoot.String(), receipt.LocalExitRoot.String(), string(batchResourcesJsonBytes)) + + highReservedCounters, err := json.Marshal(receipt.HighReservedZKCounters) + if err != nil { + return err + } + + _, err = e.Exec(ctx, updateL2DataSQL, receipt.BatchNumber, receipt.BatchL2Data, receipt.GlobalExitRoot.String(), receipt.StateRoot.String(), receipt.LocalExitRoot.String(), string(batchResourcesJsonBytes), string(highReservedCounters)) return err } @@ -1030,7 +1048,7 @@ func (p *PostgresStorage) GetLatestBatchGlobalExitRoot(ctx context.Context, dbTx // GetNotCheckedBatches returns the batches that are closed but not checked func (p *PostgresStorage) GetNotCheckedBatches(ctx context.Context, dbTx pgx.Tx) ([]*state.Batch, error) { const getBatchesNotCheckedSQL = ` - SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, wip + SELECT batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, batch_resources, high_reserved_counters, wip from state.batch WHERE wip IS FALSE AND checked IS FALSE ORDER BY batch_num ASC` e := p.getExecQuerier(dbTx) diff --git a/state/test/forkid_etrog/etrog_test.go b/state/test/forkid_etrog/etrog_test.go index cea81fa86b..67e5beb300 100644 --- a/state/test/forkid_etrog/etrog_test.go +++ b/state/test/forkid_etrog/etrog_test.go @@ -127,7 +127,7 @@ func TestStateTransition(t *testing.T) { SkipVerifyL1InfoRoot_V2: testCase.L1InfoTree.SkipVerifyL1InfoRoot, } - processResponse, _ := testState.ProcessBatchV2(ctx, processRequest, true) + processResponse, _, _ := testState.ProcessBatchV2(ctx, processRequest, true) require.Nil(t, processResponse.ExecutorError) require.Equal(t, testCase.ExpectedNewStateRoot, processResponse.NewStateRoot.String()) } diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go index e9c5f45c9e..851b6105f7 100644 --- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go @@ -2702,7 +2702,7 @@ func (_c *StateFullInterface_ProcessBatch_Call) RunAndReturn(run func(context.Co } // ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree -func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) { +func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) { ret := _m.Called(ctx, request, updateMerkleTree) if len(ret) == 0 { @@ -2710,8 +2710,9 @@ func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state. } var r0 *state.ProcessBatchResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok { + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok { return rf(ctx, request, updateMerkleTree) } if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok { @@ -2722,13 +2723,19 @@ func (_m *StateFullInterface) ProcessBatchV2(ctx context.Context, request state. } } - if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok { r1 = rf(ctx, request, updateMerkleTree) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(string) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok { + r2 = rf(ctx, request, updateMerkleTree) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // StateFullInterface_ProcessBatchV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBatchV2' @@ -2751,12 +2758,12 @@ func (_c *StateFullInterface_ProcessBatchV2_Call) Run(run func(ctx context.Conte return _c } -func (_c *StateFullInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 error) *StateFullInterface_ProcessBatchV2_Call { - _c.Call.Return(_a0, _a1) +func (_c *StateFullInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 string, _a2 error) *StateFullInterface_ProcessBatchV2_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *StateFullInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)) *StateFullInterface_ProcessBatchV2_Call { +func (_c *StateFullInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)) *StateFullInterface_ProcessBatchV2_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go index 65d98bc264..d5cc07fad8 100644 --- a/synchronizer/common/syncinterfaces/state.go +++ b/synchronizer/common/syncinterfaces/state.go @@ -51,7 +51,7 @@ type StateFullInterface interface { OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error ProcessBatch(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) - ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) + ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, egpLog *state.EffectiveGasPriceLog, globalExitRoot, blockInfoRoot common.Hash, dbTx pgx.Tx) (*state.L2Header, error) GetStateRootByBatchNumber(ctx context.Context, batchNum uint64, dbTx pgx.Tx) (common.Hash, error) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go index 7c89494441..fd22c2b1b5 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go +++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go @@ -36,7 +36,7 @@ type StateInterface interface { UpdateWIPBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error - ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) + ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error GetL1InfoTreeDataFromBatchL2Data(ctx context.Context, batchL2Data []byte, dbTx pgx.Tx) (map[uint32]state.L1DataV2, common.Hash, common.Hash, error) GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error) @@ -376,7 +376,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) processAndStoreTxs(ctx context.Contex if request.OldStateRoot == state.ZeroHash { log.Warnf("%s Processing batch with oldStateRoot == zero....", debugPrefix) } - processBatchResp, err := b.state.ProcessBatchV2(ctx, request, true) + processBatchResp, _, err := b.state.ProcessBatchV2(ctx, request, true) if err != nil { log.Errorf("%s error processing sequencer batch for batch: %v error:%v ", debugPrefix, request.BatchNumber, err) return nil, err diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go index 97a7125b96..98f9d28fea 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go +++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync_test.go @@ -76,7 +76,7 @@ func TestIncrementalProcessUpdateBatchL2DataOnCache(t *testing.T) { processBatchResp := &state.ProcessBatchResponse{ NewStateRoot: expectedStateRoot, } - stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, nil).Once() + stateMock.EXPECT().ProcessBatchV2(ctx, mock.Anything, true).Return(processBatchResp, "", nil).Once() syncMock.EXPECT().PendingFlushID(mock.Anything, mock.Anything).Once() syncMock.EXPECT().CheckFlushID(mock.Anything).Return(nil).Maybe() @@ -206,7 +206,7 @@ func TestNothingProcessDoesntMatchBatchReprocess(t *testing.T) { processBatchResp := &state.ProcessBatchResponse{ NewStateRoot: data.TrustedBatch.StateRoot, } - testData.stateMock.EXPECT().ProcessBatchV2(testData.ctx, mock.Anything, true).Return(processBatchResp, nil).Once() + testData.stateMock.EXPECT().ProcessBatchV2(testData.ctx, mock.Anything, true).Return(processBatchResp, "", nil).Once() testData.stateMock.EXPECT().GetBatchByNumber(testData.ctx, data.BatchNumber, mock.Anything).Return(&state.Batch{}, nil).Once() _, err := testData.sut.NothingProcess(testData.ctx, &data, nil) require.NoError(t, err) diff --git a/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go b/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go index 5101bb4b6a..43e84ffba5 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go +++ b/synchronizer/l2_sync/l2_sync_etrog/mocks/state_interface.go @@ -423,7 +423,7 @@ func (_c *StateInterface_OpenBatch_Call) RunAndReturn(run func(context.Context, } // ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree -func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) { +func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) { ret := _m.Called(ctx, request, updateMerkleTree) if len(ret) == 0 { @@ -431,8 +431,9 @@ func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.Proc } var r0 *state.ProcessBatchResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok { + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok { return rf(ctx, request, updateMerkleTree) } if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok { @@ -443,13 +444,19 @@ func (_m *StateInterface) ProcessBatchV2(ctx context.Context, request state.Proc } } - if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok { r1 = rf(ctx, request, updateMerkleTree) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(string) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok { + r2 = rf(ctx, request, updateMerkleTree) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // StateInterface_ProcessBatchV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBatchV2' @@ -472,12 +479,12 @@ func (_c *StateInterface_ProcessBatchV2_Call) Run(run func(ctx context.Context, return _c } -func (_c *StateInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 error) *StateInterface_ProcessBatchV2_Call { - _c.Call.Return(_a0, _a1) +func (_c *StateInterface_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 string, _a2 error) *StateInterface_ProcessBatchV2_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *StateInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)) *StateInterface_ProcessBatchV2_Call { +func (_c *StateInterface_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)) *StateInterface_ProcessBatchV2_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 8a3b196e3c..b330d63605 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -894,7 +894,7 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr if etrogMode { m.State.EXPECT().GetL1InfoTreeDataFromBatchL2Data(mock.Anything, mock.Anything, mock.Anything).Return(map[uint32]state.L1DataV2{}, common.Hash{}, common.Hash{}, nil).Times(1) m.State.EXPECT().ProcessBatchV2(mock.Anything, mock.Anything, mock.Anything). - Return(&processedBatch, nil).Times(1) + Return(&processedBatch, "", nil).Times(1) m.State.EXPECT().StoreL2Block(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil).Times(1) m.State.EXPECT().UpdateWIPBatch(mock.Anything, mock.Anything, mock.Anything). diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml index b80bed0b82..4ed05c7b1f 100644 --- a/test/config/debug.node.config.toml +++ b/test/config/debug.node.config.toml @@ -114,9 +114,10 @@ StateConsistencyCheckInterval = "5s" BatchMaxDeltaTimestamp = "20s" L2BlockMaxDeltaTimestamp = "4s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "120s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index 0665c104a3..19dae42726 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -37,8 +37,8 @@ MaxTxDataBytesSize=100000 DefaultMinGasPriceAllowed = 1000000000 MinAllowedGasPriceInterval = "5m" PollMinAllowedGasPriceInterval = "15s" -AccountQueue = 64 -GlobalQueue = 1024 +AccountQueue = 0 +GlobalQueue = 0 [Pool.EffectiveGasPrice] Enabled = false L1GasPriceFactor = 0.25 @@ -115,9 +115,10 @@ StateConsistencyCheckInterval = "5s" BatchMaxDeltaTimestamp = "20s" L2BlockMaxDeltaTimestamp = "4s" ResourceExhaustedMarginPct = 10 + StateRootSyncInterval = "60s" HaltOnBatchNumber = 0 SequentialBatchSanityCheck = false - SequentialProcessL2Block = true + SequentialProcessL2Block = false [Sequencer.Finalizer.Metrics] Interval = "60m" EnableLog = true diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 24b6d18241..ac76926f01 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -1,4 +1,3 @@ -version: "3.5" networks: default: name: zkevm @@ -26,7 +25,7 @@ services: volumes: - ./config/telegraf.conf:/etc/telegraf/telegraf.conf:ro - /var/run/docker.sock:/var/run/docker.sock:ro - user: telegraf:${DOCKERGID} + user: telegraf:${DOCKERGID:-} environment: - POSTGRES_HOST=grafana-db - POSTGRES_USER=user @@ -56,8 +55,8 @@ services: environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -77,8 +76,8 @@ services: environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} - ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545 volumes: - ./config/test.node.config.toml:/app/config.toml @@ -96,8 +95,8 @@ services: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - ZKEVM_NODE_SEQUENCER_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} volumes: - ./sequencer.keystore:/pk/sequencer.keystore - ./config/test.node.config.toml:/app/config.toml @@ -114,8 +113,8 @@ services: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - ZKEVM_NODE_SEQUENCER_SENDER_ADDRESS=0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} - ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545 volumes: - ./sequencer.keystore:/pk/sequencer.keystore @@ -136,8 +135,8 @@ services: environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -156,8 +155,8 @@ services: environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} - ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545 volumes: - ./config/test.node.config.toml:/app/config.toml @@ -210,8 +209,8 @@ services: - 9095:9091 # needed if metrics enabled environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -227,8 +226,8 @@ services: - 9095:9091 # needed if metrics enabled environment: - ZKEVM_NODE_STATE_DB_HOST=zkevm-state-db - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} - ZKEVM_NODE_ETHERMAN_URL=http://zkevm-v1tov2-l1-network:8545 volumes: - ./config/test.node.config.toml:/app/config.toml @@ -430,8 +429,8 @@ services: - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db - ZKEVM_NODE_RPC_PORT=8124 - ZKEVM_NODE_RPC_WEBSOCKETS_PORT=8134 - - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI} - - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI} + - ZKEVM_NODE_MTCLIENT_URI=${ZKEVM_NODE_MTCLIENT_URI:-} + - ZKEVM_NODE_EXECUTOR_URI=${ZKEVM_NODE_EXECUTOR_URI:-} volumes: - ./config/test.node.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json