Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

+ renamed config vars for L1 parallel sync #2768

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,8 +304,8 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerS

etherManForL1 := []synchronizer.EthermanInterface{}
// If synchronizer are using sequential mode, we only need one etherman client
if cfg.Synchronizer.UseParallelModeForL1Synchronization {
for i := 0; i < int(cfg.Synchronizer.L1ParallelSynchronization.NumberOfParallelOfEthereumClients); i++ {
if cfg.Synchronizer.L1SynchronizationMode == synchronizer.ParallelMode {
for i := 0; i < int(cfg.Synchronizer.L1ParallelSynchronization.MaxClients+1); i++ {
eth, err := newEtherman(cfg)
if err != nil {
log.Fatal(err)
Expand Down
16 changes: 12 additions & 4 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,18 @@ func Test_Defaults(t *testing.T) {
path: "Synchronizer.SyncChunkSize",
expectedValue: uint64(100),
},
{
path: "Synchronizer.L1SynchronizationMode",
expectedValue: "parallel",
},
{
path: "Synchronizer.L1ParallelSynchronization.MaxClients",
expectedValue: uint64(10),
},
{
path: "Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks",
expectedValue: uint64(25),
},
{
path: "Sequencer.WaitPeriodPoolIsEmpty",
expectedValue: types.NewDuration(1 * time.Second),
Expand Down Expand Up @@ -489,10 +501,6 @@ func Test_Defaults(t *testing.T) {
path: "State.Batch.Constraints.MaxBinaries",
expectedValue: uint32(473170),
},
{
path: "State.Batch.Constraints.MaxSteps",
expectedValue: uint32(7570538),
},
}
file, err := os.CreateTemp("", "genesisConfig")
require.NoError(t, err)
Expand Down
26 changes: 13 additions & 13 deletions config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,20 +98,20 @@ EnableHttpLog = true
SyncInterval = "1s"
SyncChunkSize = 100
TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc
UseParallelModeForL1Synchronization = true
L1SynchronizationMode = "parallel"
[Synchronizer.L1ParallelSynchronization]
NumberOfParallelOfEthereumClients = 10
CapacityOfBufferingRollupInfoFromL1 = 25
TimeForCheckLastBlockOnL1Time = "5s"
TimeoutForRequestLastBlockOnL1 = "5s"
MaxNumberOfRetriesForRequestLastBlockOnL1 = 3
TimeForShowUpStatisticsLog = "5m"
TimeOutMainLoop = "5m"
MinTimeBetweenRetriesForRollupInfo = "5s"
[Synchronizer.L1ParallelSynchronization.PerformanceCheck]
AcceptableTimeWaitingForNewRollupInfo = "5s"
NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo = 10
SwitchToSequentialModeIfIsSynchronized = false
MaxClients = 10
MaxPendingNoProcessedBlocks = 25
RequestLastBlockPeriod = "5s"
RequestLastBlockTimeout = "5s"
RequestLastBlockMaxRetries = 3
StatisticsPeriod = "5m"
TimeoutMainLoop = "5m"
RollupInfoRetriesSpacing= "5s"
FallbackToSequentialModeOnSynchronized = false
[Synchronizer.L1ParallelSynchronization.PerformanceWarning]
AceptableInacctivityTime = "5s"
ApplyAfterNumRollupReceived = 10
[Sequencer]
WaitPeriodPoolIsEmpty = "1s"
Expand Down
22 changes: 11 additions & 11 deletions docs/config-file/node-config-doc.html

Large diffs are not rendered by default.

135 changes: 69 additions & 66 deletions docs/config-file/node-config-doc.md

Large diffs are not rendered by default.

56 changes: 30 additions & 26 deletions docs/config-file/node-config-schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -458,74 +458,78 @@
"description": "TrustedSequencerURL is the rpc url to connect and sync the trusted state",
"default": ""
},
"UseParallelModeForL1Synchronization": {
"type": "boolean",
"description": "L1ParallelSynchronization Use new L1 synchronization that do in parallel request to L1 and process the data\nIf false use the legacy sequential mode",
"default": true
"L1SynchronizationMode": {
"type": "string",
"enum": [
"sequential",
"parallel"
],
"description": "L1SynchronizationMode define how to synchronize with L1:\n- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data\n- sequential: Request data to L1 and execute",
"default": "parallel"
},
"L1ParallelSynchronization": {
"properties": {
"NumberOfParallelOfEthereumClients": {
"MaxClients": {
"type": "integer",
"description": "NumberOfParallelOfEthereumClients Number of clients used to synchronize with L1\n(if UseParallelModeForL1Synchronization is true)",
"description": "MaxClients Number of clients used to synchronize with L1",
"default": 10
},
"CapacityOfBufferingRollupInfoFromL1": {
"MaxPendingNoProcessedBlocks": {
"type": "integer",
"description": "CapacityOfBufferingRollupInfoFromL1 Size of the buffer used to store rollup information from L1, must be \u003e= to NumberOfEthereumClientsToSync\nsugested twice of NumberOfParallelOfEthereumClients\n(if UseParallelModeForL1Synchronization is true)",
"description": "MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be \u003e= to NumberOfEthereumClientsToSync\nsugested twice of NumberOfParallelOfEthereumClients",
"default": 25
},
"TimeForCheckLastBlockOnL1Time": {
"RequestLastBlockPeriod": {
"type": "string",
"title": "Duration",
"description": "TimeForCheckLastBlockOnL1Time is the time to wait to request the\nlast block to L1 to known if we need to retrieve more data.\nThis value only apply when the system is synchronized",
"description": "RequestLastBlockPeriod is the time to wait to request the\nlast block to L1 to known if we need to retrieve more data.\nThis value only apply when the system is synchronized",
"default": "5s",
"examples": [
"1m",
"300ms"
]
},
"PerformanceCheck": {
"PerformanceWarning": {
"properties": {
"AcceptableTimeWaitingForNewRollupInfo": {
"AceptableInacctivityTime": {
"type": "string",
"title": "Duration",
"description": "AcceptableTimeWaitingForNewRollupInfo is the expected maximum time that the consumer\ncould wait until new data is produced. If the time is greater it emmit a log to warn about\nthat. The idea is keep working the consumer as much as possible, so if the producer is not\nfast enought then you could increse the number of parallel clients to sync with L1",
"description": "AceptableInacctivityTime is the expected maximum time that the consumer\ncould wait until new data is produced. If the time is greater it emmit a log to warn about\nthat. The idea is keep working the consumer as much as possible, so if the producer is not\nfast enought then you could increse the number of parallel clients to sync with L1",
"default": "5s",
"examples": [
"1m",
"300ms"
]
},
"NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo": {
"ApplyAfterNumRollupReceived": {
"type": "integer",
"description": "NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo is the number of iterations to\nstart checking the time waiting for new rollup info data",
"description": "ApplyAfterNumRollupReceived is the number of iterations to\nstart checking the time waiting for new rollup info data",
"default": 10
}
},
"additionalProperties": false,
"type": "object",
"description": "Consumer Configuration for the consumer of rollup information from L1"
},
"TimeoutForRequestLastBlockOnL1": {
"RequestLastBlockTimeout": {
"type": "string",
"title": "Duration",
"description": "TimeoutForRequestLastBlockOnL1 Timeout for request LastBlock On L1",
"description": "RequestLastBlockTimeout Timeout for request LastBlock On L1",
"default": "5s",
"examples": [
"1m",
"300ms"
]
},
"MaxNumberOfRetriesForRequestLastBlockOnL1": {
"RequestLastBlockMaxRetries": {
"type": "integer",
"description": "MaxNumberOfRetriesForRequestLastBlockOnL1 Max number of retries to request LastBlock On L1",
"description": "RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1",
"default": 3
},
"TimeForShowUpStatisticsLog": {
"StatisticsPeriod": {
"type": "string",
"title": "Duration",
"description": "TimeForShowUpStatisticsLog how ofter show a log with statistics (0 is disabled)",
"description": "StatisticsPeriod how ofter show a log with statistics (0 is disabled)",
"default": "5m0s",
"examples": [
"1m",
Expand All @@ -542,25 +546,25 @@
"300ms"
]
},
"MinTimeBetweenRetriesForRollupInfo": {
"RollupInfoRetriesSpacing": {
"type": "string",
"title": "Duration",
"description": "MinTimeBetweenRetriesForRollupInfo is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1",
"description": "RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1",
"default": "5s",
"examples": [
"1m",
"300ms"
]
},
"SwitchToSequentialModeIfIsSynchronized": {
"FallbackToSequentialModeOnSynchronized": {
"type": "boolean",
"description": "SwitchToSequentialModeIfIsSynchronized if true switch to sequential mode if the system is synchronized",
"description": "FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized",
"default": false
}
},
"additionalProperties": false,
"type": "object",
"description": "L1ParallelSynchronization Configuration for parallel mode (if UseParallelModeForL1Synchronization is true)"
"description": "L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')"
}
},
"additionalProperties": false,
Expand Down
41 changes: 20 additions & 21 deletions docs/design/synchronizer/l1_synchronization.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,43 +10,44 @@ This is a refactor of L1 synchronization to improve speed.
You could choose between new L1 parallel sync or sequential one (legacy):
```
[Synchronizer]
UseParallelModeForL1Synchronization = false
L1SynchronizationMode = "parallel"
```
If you activate this feature you can configure:
- `NumberOfParallelOfEthereumClients`: how many parallel request can be done. You must consider that 1 is just for requesting the last block on L1, and the rest for rollup info
- `CapacityOfBufferingRollupInfoFromL1`: buffer of data pending to be processed. This is the queue data to be executed by consumer.
- `MaxClients`: how many parallel request can be done. You must consider that 1 is just for requesting the last block on L1, and the rest for rollup info
- `MaxPendingNoProcessedBlocks`: buffer of data pending to be processed. This is the queue data to be executed by consumer.

For a full description of fields please check config-file documentation.

Example:
```
UseParallelModeForL1Synchronization = true
L1SynchronizationMode = parallel
[Synchronizer.L1ParallelSynchronization]
NumberOfParallelOfEthereumClients = 2
CapacityOfBufferingRollupInfoFromL1 = 10
TimeForCheckLastBlockOnL1Time = "5s"
TimeoutForRequestLastBlockOnL1 = "5s"
MaxNumberOfRetriesForRequestLastBlockOnL1 = 3
TimeForShowUpStatisticsLog = "5m"
TimeOutMainLoop = "5m"
MinTimeBetweenRetriesForRollupInfo = "5s"
[Synchronizer.L1ParallelSynchronization.PerformanceCheck]
AcceptableTimeWaitingForNewRollupInfo = "5s"
NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo = 10
MaxClients = 10
MaxPendingNoProcessedBlocks = 25
RequestLastBlockPeriod = "5s"
RequestLastBlockTimeout = "5s"
RequestLastBlockMaxRetries = 3
StatisticsPeriod = "5m"
TimeoutMainLoop = "5m"
RollupInfoRetriesSpacing= "5s"
FallbackToSequentialModeOnSynchronized = false
[Synchronizer.L1ParallelSynchronization.PerformanceWarning]
AceptableInacctivityTime = "5s"
ApplyAfterNumRollupReceived = 10
```
## Remakable logs
### How to known the occupation of executor
To check that executor are fully ocuppied you can check next log:
```
INFO synchronizer/l1_rollup_info_consumer.go:128 consumer: processing rollupInfo #1553: range:[8720385, 8720485] num_blocks [37] statistics:wasted_time_waiting_for_data [0s] last_process_time [6m2.635208117s] block_per_second [2.766837]
consumer: processing rollupInfo #808: range:[9606297, 9606397] num_blocks [7] statistics:wasted_time_waiting_for_data [0s] last_process_time [27.557166427s] block_per_second [0.318281]
```
The `wasted_time_waiting_for_data` show the waiting time between this call and the previous to executor. It could show a warning configuring `Synchronizer.L1ParallelSynchronization.PerformanceCheck`
The `wasted_time_waiting_for_data` show the waiting time between this call and the previous to executor. It could generate a warning depending on the configuring `SSynchronizer.L1ParallelSynchronization.PerformanceWarning`

### Estimated time to be fully synchronizer with L1
This log show the estimated time (**ETA**) to reach the block goal. You can configure the frequency with var `TimeForShowUpStatisticsLog`
This log show the estimated time (**ETA**) to reach the block goal. You can configure the frequency with var `StatisticsPeriod`
```
INFO synchronizer/l1_rollup_info_producer.go:357 producer: Statistics:ETA: 54h7m47.594422312s percent:12.26 blocks_per_seconds:5.48 pending_block:149278/1217939 num_errors:8
INFO producer: Statistics: EstimatedTimeOfArrival: 1h58m42.730543611s percent:0.15 blocks_per_seconds:201.24 pending_block:2222/1435629 num_errors:0
```

## Flow of data
Expand All @@ -59,5 +60,3 @@ INFO synchronizer/l1_rollup_info_producer.go:357 producer: Statistics:ETA: 54h7m
- `l1RollupInfoConsumer`: that receive the data and execute it


## Future changes
- Configure multiples servers for L1 information: instead of calling the same server,it make sense to configure individually each URL to allow to have multiples sources
59 changes: 29 additions & 30 deletions synchronizer/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,53 +13,52 @@ type Config struct {
// TrustedSequencerURL is the rpc url to connect and sync the trusted state
TrustedSequencerURL string `mapstructure:"TrustedSequencerURL"`

// L1ParallelSynchronization Use new L1 synchronization that do in parallel request to L1 and process the data
// If false use the legacy sequential mode
UseParallelModeForL1Synchronization bool `mapstructure:"UseParallelModeForL1Synchronization"`
// L1ParallelSynchronization Configuration for parallel mode (if UseParallelModeForL1Synchronization is true)
L1ParallelSynchronization L1ParallelSynchronizationConfig `mapstructure:"L1ParallelSynchronization"`
// L1SynchronizationMode define how to synchronize with L1:
// - parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
// - sequential: Request data to L1 and execute
L1SynchronizationMode string `jsonschema:"enum=sequential,enum=parallel"`
// L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
L1ParallelSynchronization L1ParallelSynchronizationConfig
}

// L1ParallelSynchronizationConfig Configuration for parallel mode (if UseParallelModeForL1Synchronization is true)
// L1ParallelSynchronizationConfig Configuration for parallel mode (if UL1SynchronizationMode equal to 'parallel')
type L1ParallelSynchronizationConfig struct {
// NumberOfParallelOfEthereumClients Number of clients used to synchronize with L1
// (if UseParallelModeForL1Synchronization is true)
NumberOfParallelOfEthereumClients uint64 `mapstructue:"NumberOfParallelOfEthereumClients"`
// CapacityOfBufferingRollupInfoFromL1 Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
// MaxClients Number of clients used to synchronize with L1
MaxClients uint64
// MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
// sugested twice of NumberOfParallelOfEthereumClients
// (if UseParallelModeForL1Synchronization is true)
CapacityOfBufferingRollupInfoFromL1 uint64 `mapstructure:"CapacityOfBufferingRollupInfoFromL1"`
MaxPendingNoProcessedBlocks uint64

// TimeForCheckLastBlockOnL1Time is the time to wait to request the
// RequestLastBlockPeriod is the time to wait to request the
// last block to L1 to known if we need to retrieve more data.
// This value only apply when the system is synchronized
TimeForCheckLastBlockOnL1Time types.Duration `mapstructure:"TimeForCheckLastBlockOnL1Time"`
RequestLastBlockPeriod types.Duration

// Consumer Configuration for the consumer of rollup information from L1
PerformanceCheck L1PerformanceCheckConfig `mapstructure:"PerformanceCheck"`
PerformanceWarning L1PerformanceCheckConfig

// TimeoutForRequestLastBlockOnL1 Timeout for request LastBlock On L1
TimeoutForRequestLastBlockOnL1 types.Duration `mapstructure:"TimeoutForRequestLastBlockOnL1"`
// MaxNumberOfRetriesForRequestLastBlockOnL1 Max number of retries to request LastBlock On L1
MaxNumberOfRetriesForRequestLastBlockOnL1 int `mapstructure:"MaxNumberOfRetriesForRequestLastBlockOnL1"`
// TimeForShowUpStatisticsLog how ofter show a log with statistics (0 is disabled)
TimeForShowUpStatisticsLog types.Duration `mapstructure:"TimeForShowUpStatisticsLog"`
// RequestLastBlockTimeout Timeout for request LastBlock On L1
RequestLastBlockTimeout types.Duration
// RequestLastBlockMaxRetries Max number of retries to request LastBlock On L1
RequestLastBlockMaxRetries int
// StatisticsPeriod how ofter show a log with statistics (0 is disabled)
StatisticsPeriod types.Duration
// TimeOutMainLoop is the timeout for the main loop of the L1 synchronizer when is not updated
TimeOutMainLoop types.Duration `mapstructure:"TimeOutMainLoop"`
// MinTimeBetweenRetriesForRollupInfo is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1
MinTimeBetweenRetriesForRollupInfo types.Duration `mapstructure:"MinTimeBetweenRetriesForRollupInfo"`
// SwitchToSequentialModeIfIsSynchronized if true switch to sequential mode if the system is synchronized
SwitchToSequentialModeIfIsSynchronized bool `mapstructure:"SwitchToSequentialModeIfIsSynchronized"`
TimeOutMainLoop types.Duration
// RollupInfoRetriesSpacing is the minimum time between retries to request rollup info (it will sleep for fulfill this time) to avoid spamming L1
RollupInfoRetriesSpacing types.Duration
// FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized
FallbackToSequentialModeOnSynchronized bool
}

// L1PerformanceCheckConfig Configuration for the consumer of rollup information from L1
type L1PerformanceCheckConfig struct {
// AcceptableTimeWaitingForNewRollupInfo is the expected maximum time that the consumer
// AceptableInacctivityTime is the expected maximum time that the consumer
// could wait until new data is produced. If the time is greater it emmit a log to warn about
// that. The idea is keep working the consumer as much as possible, so if the producer is not
// fast enought then you could increse the number of parallel clients to sync with L1
AcceptableTimeWaitingForNewRollupInfo types.Duration `mapstructure:"AcceptableTimeWaitingForNewRollupInfo"`
// NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo is the number of iterations to
AceptableInacctivityTime types.Duration
// ApplyAfterNumRollupReceived is the number of iterations to
// start checking the time waiting for new rollup info data
NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo int `mapstructure:"NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo"`
ApplyAfterNumRollupReceived int
}
Loading