Skip to content

Commit

Permalink
Merge branch 'develop' into feature/jrpc-log-limit
Browse files Browse the repository at this point in the history
  • Loading branch information
tclemos committed Oct 4, 2023
2 parents a942b45 + 4dfabaf commit 17cc597
Show file tree
Hide file tree
Showing 62 changed files with 1,606 additions and 349 deletions.
10 changes: 6 additions & 4 deletions .github/workflows/jsonschema.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ jobs:
env:
GOARCH: ${{ matrix.goarch }}

- uses: actions/setup-python@v1
- uses: BSFishy/pip-action@v1
- uses: actions/setup-python@v4
with:
packages: |
json-schema-for-humans
python-version: '3.10'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install json-schema-for-humans
- name: Check if JSON schema and generated doc is up to date
run: |
Expand Down
8 changes: 7 additions & 1 deletion cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"runtime"
"time"

datastreamerlog "github.com/0xPolygonHermez/zkevm-data-streamer/log"
"github.com/0xPolygonHermez/zkevm-node"
"github.com/0xPolygonHermez/zkevm-node/aggregator"
"github.com/0xPolygonHermez/zkevm-node/config"
Expand Down Expand Up @@ -158,6 +159,11 @@ func start(cliCtx *cli.Context) error {
}
go runAggregator(cliCtx.Context, c.Aggregator, etherman, etm, st)
case SEQUENCER:
c.Sequencer.StreamServer.Log = datastreamerlog.Config{
Environment: datastreamerlog.LogEnvironment(c.Log.Environment),
Level: c.Log.Level,
Outputs: c.Log.Outputs,
}
ev.Component = event.Component_Sequencer
ev.Description = "Running sequencer"
err := eventLog.LogEvent(cliCtx.Context, ev)
Expand Down Expand Up @@ -309,7 +315,7 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager
}
sy, err := synchronizer.NewSynchronizer(
cfg.IsTrustedSequencer, etherman, etherManForL1, st, pool, ethTxManager,
zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer,
zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, cfg.Log.Environment == "development",
)
if err != nil {
log.Fatal(err)
Expand Down
12 changes: 12 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,18 @@ func Test_Defaults(t *testing.T) {
path: "Sequencer.DBManager.L2ReorgRetrievalInterval",
expectedValue: types.NewDuration(5 * time.Second),
},
{
path: "Sequencer.StreamServer.Port",
expectedValue: uint16(0),
},
{
path: "Sequencer.StreamServer.Filename",
expectedValue: "",
},
{
path: "Sequencer.StreamServer.Enabled",
expectedValue: false,
},
{
path: "SequenceSender.WaitPeriodSendSequence",
expectedValue: types.NewDuration(5 * time.Second),
Expand Down
6 changes: 5 additions & 1 deletion config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ WriteTimeout = "60s"
MaxRequestsPerIPAndSecond = 500
SequencerNodeURI = ""
EnableL2SuggestedGasPricePolling = true
TraceBatchUseHTTPS = true
BatchRequestsEnabled = false
BatchRequestsLimit = 20
MaxLogsCount = 10000
Expand All @@ -99,6 +98,7 @@ UseParallelModeForL1Synchronization = true
MaxNumberOfRetriesForRequestLastBlockOnL1 = 3
TimeForShowUpStatisticsLog = "5m"
TimeOutMainLoop = "5m"
MinTimeBetweenRetriesForRollupInfo = "5s"
[Synchronizer.L1ParallelSynchronization.PerformanceCheck]
AcceptableTimeWaitingForNewRollupInfo = "5s"
NumIterationsBeforeStartCheckingTimeWaitinfForNewRollupInfo = 10
Expand Down Expand Up @@ -131,6 +131,10 @@ MaxTxLifetime = "3h"
ByteGasCost = 16
MarginFactor = 1
Enabled = false
[Sequencer.StreamServer]
Port = 0
Filename = ""
Enabled = false
[SequenceSender]
WaitPeriodSendSequence = "5s"
Expand Down
4 changes: 4 additions & 0 deletions config/environments/local/local.node.config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@ MaxTxLifetime = "3h"
ByteGasCost = 16
MarginFactor = 1
Enabled = false
[Sequencer.StreamServer]
Port = 0
Filename = ""
Enabled = false

[SequenceSender]
WaitPeriodSendSequence = "5s"
Expand Down
73 changes: 34 additions & 39 deletions config/gen_json_schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,37 +201,35 @@ func fillDefaultValuesPartial(schema *jsonschema.Schema, default_config interfac
if schema.Properties == nil {
return
}
for _, key := range schema.Properties.Keys() {
for pair := schema.Properties.Oldest(); pair != nil; pair = pair.Next() {
key := pair.Key
value_schema := pair.Value
log.Debugf("fillDefaultValuesPartial: key: %s", key)
value, ok := schema.Properties.Get(key)
if ok {
value_schema, _ := value.(*jsonschema.Schema)
default_value := getValueFromStruct(default_config, key)
if default_value.IsValid() && variantFieldIsSet(&value_schema.Default) {
switch value_schema.Type {
case "array":
if default_value.Kind() == reflect.ValueOf(common.Address{}).Kind() {
if !default_value.IsZero() {
def_value := default_value.Interface()
value_schema.Default = def_value
}
} else {
if !default_value.IsZero() && !default_value.IsNil() {
def_value := default_value.Interface()
value_schema.Default = def_value
}
default_value := getValueFromStruct(default_config, key)
if default_value.IsValid() && variantFieldIsSet(&value_schema.Default) {
switch value_schema.Type {
case "array":
if default_value.Kind() == reflect.ValueOf(common.Address{}).Kind() {
if !default_value.IsZero() {
def_value := default_value.Interface()
value_schema.Default = def_value
}
} else {
if !default_value.IsZero() && !default_value.IsNil() {
def_value := default_value.Interface()
value_schema.Default = def_value
}
case "object":
fillDefaultValuesPartial(value_schema, default_value.Interface())
default: // string, number, integer, boolean
if default_value.Type() == reflect.TypeOf(types.Duration{}) {
duration, ok := default_value.Interface().(types.Duration)
if ok {
value_schema.Default = duration.String()
}
} else {
value_schema.Default = default_value.Interface()
}
case "object":
fillDefaultValuesPartial(value_schema, default_value.Interface())
default: // string, number, integer, boolean
if default_value.Type() == reflect.TypeOf(types.Duration{}) {
duration, ok := default_value.Interface().(types.Duration)
if ok {
value_schema.Default = duration.String()
}
} else {
value_schema.Default = default_value.Interface()
}
}
}
Expand All @@ -243,17 +241,14 @@ func cleanRequiredFields(schema *jsonschema.Schema) {
if schema.Properties == nil {
return
}
for _, key := range schema.Properties.Keys() {
value, ok := schema.Properties.Get(key)
if ok {
value_schema, _ := value.(*jsonschema.Schema)
value_schema.Required = []string{}
switch value_schema.Type {
case "object":
cleanRequiredFields(value_schema)
case "array":
cleanRequiredFields(value_schema.Items)
}
for pair := schema.Properties.Oldest(); pair != nil; pair = pair.Next() {
value_schema := pair.Value
value_schema.Required = []string{}
switch value_schema.Type {
case "object":
cleanRequiredFields(value_schema)
case "array":
cleanRequiredFields(value_schema.Items)
}
}
}
8 changes: 1 addition & 7 deletions config/gen_json_schema_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,16 +377,10 @@ func getValueFromSchema(schema *jsonschema.Schema, keys []string) (*jsonschema.S

for _, key := range keys {
v, exist := subschema.Properties.Get(key)

if !exist {
return nil, errors.New("key " + key + " doesnt exist in schema")
}

new_schema, ok := v.(*jsonschema.Schema)
if !ok {
return nil, errors.New("fails conversion for key " + key + " doesnt exist in schema")
}
subschema = new_schema
subschema = v
}
return subschema, nil
}
14 changes: 0 additions & 14 deletions db/migrations/state/0010.sql
Original file line number Diff line number Diff line change
@@ -1,19 +1,5 @@
-- +migrate Up
CREATE INDEX IF NOT EXISTS l2block_created_at_idx ON state.l2block (created_at);
CREATE INDEX IF NOT EXISTS l2block_block_hash_idx ON state.l2block (block_hash);

CREATE INDEX IF NOT EXISTS log_log_index_idx ON state.log (log_index);
CREATE INDEX IF NOT EXISTS log_topic0_idx ON state.log (topic0);
CREATE INDEX IF NOT EXISTS log_topic1_idx ON state.log (topic1);
CREATE INDEX IF NOT EXISTS log_topic2_idx ON state.log (topic2);
CREATE INDEX IF NOT EXISTS log_topic3_idx ON state.log (topic3);

-- +migrate Down
DROP INDEX IF EXISTS state.l2block_created_at_idx;
DROP INDEX IF EXISTS state.l2block_block_hash_idx;

DROP INDEX IF EXISTS state.log_log_index_idx;
DROP INDEX IF EXISTS state.log_topic0_idx;
DROP INDEX IF EXISTS state.log_topic1_idx;
DROP INDEX IF EXISTS state.log_topic2_idx;
DROP INDEX IF EXISTS state.log_topic3_idx;
45 changes: 45 additions & 0 deletions db/migrations/state/0010_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
package migrations_test

import (
"database/sql"
"testing"

"github.com/stretchr/testify/assert"
)

// this migration changes length of the token name
type migrationTest0010 struct{}

func (m migrationTest0010) InsertData(db *sql.DB) error {
return nil
}

func (m migrationTest0010) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
indexes := []string{"l2block_block_hash_idx"}
// Check indexes adding
for _, idx := range indexes {
// getIndex
const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;`
row := db.QueryRow(getIndex, idx)
var result int
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 1, result)
}
}

func (m migrationTest0010) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
indexes := []string{"l2block_block_hash_idx"}
// Check indexes removing
for _, idx := range indexes {
// getIndex
const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;`
row := db.QueryRow(getIndex, idx)
var result int
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 0, result)
}
}

func TestMigration0010(t *testing.T) {
runMigrationTest(t, 10, migrationTest0010{})
}
17 changes: 17 additions & 0 deletions db/migrations/state/0011.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
-- +migrate Up
CREATE INDEX IF NOT EXISTS l2block_created_at_idx ON state.l2block (created_at);

CREATE INDEX IF NOT EXISTS log_log_index_idx ON state.log (log_index);
CREATE INDEX IF NOT EXISTS log_topic0_idx ON state.log (topic0);
CREATE INDEX IF NOT EXISTS log_topic1_idx ON state.log (topic1);
CREATE INDEX IF NOT EXISTS log_topic2_idx ON state.log (topic2);
CREATE INDEX IF NOT EXISTS log_topic3_idx ON state.log (topic3);

-- +migrate Down
DROP INDEX IF EXISTS state.l2block_created_at_idx;

DROP INDEX IF EXISTS state.log_log_index_idx;
DROP INDEX IF EXISTS state.log_topic0_idx;
DROP INDEX IF EXISTS state.log_topic1_idx;
DROP INDEX IF EXISTS state.log_topic2_idx;
DROP INDEX IF EXISTS state.log_topic3_idx;
59 changes: 59 additions & 0 deletions db/migrations/state/0011_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package migrations_test

import (
"database/sql"
"testing"

"github.com/stretchr/testify/assert"
)

// this migration changes length of the token name
type migrationTest0011 struct{}

func (m migrationTest0011) InsertData(db *sql.DB) error {
return nil
}

func (m migrationTest0011) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
indexes := []string{
"l2block_created_at_idx",
"log_log_index_idx",
"log_topic0_idx",
"log_topic1_idx",
"log_topic2_idx",
"log_topic3_idx",
}
// Check indexes adding
for _, idx := range indexes {
// getIndex
const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;`
row := db.QueryRow(getIndex, idx)
var result int
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 1, result)
}
}

func (m migrationTest0011) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
indexes := []string{
"l2block_created_at_idx",
"log_log_index_idx",
"log_topic0_idx",
"log_topic1_idx",
"log_topic2_idx",
"log_topic3_idx",
}
// Check indexes removing
for _, idx := range indexes {
// getIndex
const getIndex = `SELECT count(*) FROM pg_indexes WHERE indexname = $1;`
row := db.QueryRow(getIndex, idx)
var result int
assert.NoError(t, row.Scan(&result))
assert.Equal(t, 0, result)
}
}

func TestMigration0011(t *testing.T) {
runMigrationTest(t, 11, migrationTest0011{})
}
Loading

0 comments on commit 17cc597

Please sign in to comment.