From fb19d799f57d4cd670df2f5e0231d9a301725140 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 18:35:38 +0400 Subject: [PATCH 01/66] chain index complete for msgs and txns --- chain/events/events.go | 15 ++ chain/events/observer.go | 15 ++ chain/gen/gen.go | 4 +- chain/index/interface.go | 18 +- chain/index/msgindex.go | 31 +-- chain/index/msgindex_test.go | 8 +- chain/stmgr/forks_test.go | 10 +- chain/stmgr/searchwait.go | 8 +- chain/stmgr/stmgr.go | 12 +- chain/store/store_test.go | 4 +- chainindex/ddls.go | 47 ++++ chainindex/gc.go | 113 ++++++++++ chainindex/helpers.go | 108 +++++++++ chainindex/indexer.go | 300 +++++++++++++++++++++++++ chainindex/interface.go | 81 +++++++ chainindex/pub_sub.go | 59 +++++ chainindex/read.go | 155 +++++++++++++ cmd/lotus-bench/import.go | 4 +- cmd/lotus-shed/balances.go | 7 +- cmd/lotus-shed/gas-estimation.go | 7 +- cmd/lotus-shed/invariants.go | 4 +- cmd/lotus-shed/migrations.go | 4 +- cmd/lotus-shed/state-stats.go | 4 +- cmd/lotus-sim/simulation/node.go | 7 +- cmd/lotus-sim/simulation/simulation.go | 5 +- cmd/lotus/daemon.go | 28 +-- conformance/driver.go | 4 +- itests/eth_hash_lookup_test.go | 11 +- itests/msgindex_test.go | 2 +- lib/sqlite/sqlite.go | 1 + node/builder.go | 2 + node/builder_chain.go | 4 + node/config/doc_gen.go | 8 + node/config/types.go | 5 + node/impl/full/eth.go | 38 +++- node/modules/chain.go | 3 +- node/modules/chainindex.go | 62 +++++ node/modules/ethmodule.go | 37 +-- node/modules/msgindex.go | 2 +- node/modules/stmgr.go | 5 +- 40 files changed, 1127 insertions(+), 115 deletions(-) create mode 100644 chainindex/ddls.go create mode 100644 chainindex/gc.go create mode 100644 chainindex/helpers.go create mode 100644 chainindex/indexer.go create mode 100644 chainindex/interface.go create mode 100644 chainindex/pub_sub.go create mode 100644 chainindex/read.go create mode 100644 node/modules/chainindex.go diff --git a/chain/events/events.go b/chain/events/events.go index 1b6cb81b08b..0e8b95ff7a7 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -65,3 +65,18 @@ func NewEvents(ctx context.Context, api EventHelperAPI) (*Events, error) { gcConfidence := 2 * policy.ChainFinality return newEventsWithGCConfidence(ctx, api, gcConfidence) } + +func NewEventsWithHead(ctx context.Context, api EventHelperAPI, head *types.TipSet) (*Events, error) { + gcConfidence := 2 * policy.ChainFinality + cache := newCache(api, gcConfidence) + + ob := newObserverWithHead(cache, gcConfidence, head) + if err := ob.start(ctx); err != nil { + return nil, err + } + + he := newHeightEvents(cache, ob, gcConfidence) + headChange := newHCEvents(cache, ob) + + return &Events{ob, he, headChange}, nil +} diff --git a/chain/events/observer.go b/chain/events/observer.go index 0b021f9965b..15773898d00 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -41,6 +41,21 @@ func newObserver(api *cache, gcConfidence abi.ChainEpoch) *observer { return obs } +func newObserverWithHead(api *cache, gcConfidence abi.ChainEpoch, head *types.TipSet) *observer { + obs := &observer{ + api: api, + gcConfidence: gcConfidence, + head: head, + ready: make(chan struct{}), + observers: []TipSetObserver{}, + } + obs.Observe(api.observer()) + + close(obs.ready) // Close the ready channel since we already have a head + + return obs +} + func (o *observer) start(ctx context.Context) error { go o.listenHeadChanges(ctx) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 435d942dc18..4ace34456c3 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -44,6 +44,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/journal" @@ -258,7 +259,8 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, + index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chain/index/interface.go b/chain/index/interface.go index f875a94bf79..ceb4cca7bc9 100644 --- a/chain/index/interface.go +++ b/chain/index/interface.go @@ -6,36 +6,26 @@ import ( "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chainindex" ) var ErrNotFound = errors.New("message not found") var ErrClosed = errors.New("index closed") -// MsgInfo is the Message metadata the index tracks. -type MsgInfo struct { - // the message this record refers to - Message cid.Cid - // the tipset where this message was included - TipSet cid.Cid - // the epoch where this message was included - Epoch abi.ChainEpoch -} - // MsgIndex is the interface to the message index type MsgIndex interface { // GetMsgInfo retrieves the message metadata through the index. // The lookup is done using the onchain message Cid; that is the signed message Cid // for SECP messages and unsigned message Cid for BLS messages. - GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) + GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) // Close closes the index Close() error } type dummyMsgIndex struct{} -func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { - return MsgInfo{}, ErrNotFound +func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) { + return nil, ErrNotFound } func (dummyMsgIndex) Close() error { diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go index f5248f2782e..b3059656853 100644 --- a/chain/index/msgindex.go +++ b/chain/index/msgindex.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/lib/sqlite" ) @@ -88,7 +89,7 @@ type headChange struct { app []*types.TipSet } -func NewMsgIndex(lctx context.Context, path string, cs ChainStore) (MsgIndex, error) { +func NewMsgIndex(lctx context.Context, path string, cs ChainStore, enableWrites bool) (MsgIndex, error) { db, exists, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) @@ -124,13 +125,15 @@ func NewMsgIndex(lctx context.Context, path string, cs ChainStore) (MsgIndex, er return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err) } - rnf := store.WrapHeadChangeCoalescer( - msgIndex.onHeadChange, - CoalesceMinDelay, - CoalesceMaxDelay, - CoalesceMergeInterval, - ) - cs.SubscribeHeadChanges(rnf) + if enableWrites { + rnf := store.WrapHeadChangeCoalescer( + msgIndex.onHeadChange, + CoalesceMinDelay, + CoalesceMaxDelay, + CoalesceMergeInterval, + ) + cs.SubscribeHeadChanges(rnf) + } msgIndex.workers.Add(1) go msgIndex.background(ctx) @@ -431,12 +434,12 @@ func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) er } // interface -func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { +func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) { x.closeLk.RLock() defer x.closeLk.RUnlock() if x.closed { - return MsgInfo{}, ErrClosed + return nil, ErrClosed } var ( @@ -449,18 +452,18 @@ func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { err := row.Scan(&tipset, &epoch) switch { case err == sql.ErrNoRows: - return MsgInfo{}, ErrNotFound + return nil, ErrNotFound case err != nil: - return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err) + return nil, xerrors.Errorf("error querying msgindex database: %w", err) } tipsetCid, err := cid.Decode(tipset) if err != nil { - return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err) + return nil, xerrors.Errorf("error decoding tipset cid: %w", err) } - return MsgInfo{ + return &chainindex.MsgInfo{ Message: m, TipSet: tipsetCid, Epoch: abi.ChainEpoch(epoch), diff --git a/chain/index/msgindex_test.go b/chain/index/msgindex_test.go index 2cf707b0fed..add888e2eb2 100644 --- a/chain/index/msgindex_test.go +++ b/chain/index/msgindex_test.go @@ -30,7 +30,7 @@ func TestBasicMsgIndex(t *testing.T) { tmp := t.TempDir() t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) + msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) require.NoError(t, err) defer msgIndex.Close() //nolint @@ -58,7 +58,7 @@ func TestReorgMsgIndex(t *testing.T) { tmp := t.TempDir() t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) + msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) require.NoError(t, err) defer msgIndex.Close() //nolint @@ -103,7 +103,7 @@ func TestReconcileMsgIndex(t *testing.T) { tmp := t.TempDir() t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) + msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) require.NoError(t, err) for i := 0; i < 10; i++ { @@ -130,7 +130,7 @@ func TestReconcileMsgIndex(t *testing.T) { require.NoError(t, err) // reopen to reconcile - msgIndex, err = NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) + msgIndex, err = NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) require.NoError(t, err) defer msgIndex.Close() //nolint diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 8c022755371..8f5b666ea15 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -41,6 +41,7 @@ import ( . "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" ) @@ -169,7 +170,7 @@ func TestForkHeightTriggers(t *testing.T) { } return st.Flush(ctx) - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { t.Fatal(err) } @@ -287,7 +288,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { migrationCount++ return root, nil - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { t.Fatal(err) } @@ -519,7 +520,7 @@ func TestForkPreMigration(t *testing.T) { return nil }, }}}, - }, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { t.Fatal(err) } @@ -595,6 +596,7 @@ func TestDisablePreMigration(t *testing.T) { cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, + chainindex.DummyIndexer, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -650,6 +652,7 @@ func TestMigrtionCache(t *testing.T) { cg.BeaconSchedule(), metadataDs, index.DummyMsgIndex, + chainindex.DummyIndexer, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -703,6 +706,7 @@ func TestMigrtionCache(t *testing.T) { cg.BeaconSchedule(), metadataDs, index.DummyMsgIndex, + chainindex.DummyIndexer, ) require.NoError(t, err) sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 3377389b9b6..e0db8adb11f 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -190,9 +190,13 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet } func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid) + minfo, err := sm.chainIndexer.GetMsgInfo(ctx, mcid) if err != nil { - return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err) + // If chainIndexer fails, fallback to msgIndex + minfo, err = sm.msgIndex.GetMsgInfo(ctx, mcid) + if err != nil { + return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in indexes: %w", err) + } } // check the height against the current tipset; minimum execution confidence requires that the diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 2e29dc8e746..2abc943bc1b 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -34,6 +34,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" // Used for genesis. msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" @@ -156,7 +157,8 @@ type StateManager struct { tsExecMonitor ExecMonitor beacon beacon.Schedule - msgIndex index.MsgIndex + msgIndex index.MsgIndex + chainIndexer chainindex.Indexer // We keep a small cache for calls to ExecutionTrace which helps improve // performance for node operators like exchanges and block explorers @@ -177,7 +179,8 @@ type tipSetCacheEntry struct { invocTrace []*api.InvocResult } -func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { +func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, + metadataDs dstore.Batching, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -243,12 +246,13 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, }, compWait: make(map[string]chan struct{}), msgIndex: msgIndex, + chainIndexer: chainIndexer, execTraceCache: execTraceCache, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex, chainIndexer) if err != nil { return nil, err } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 1ecfc474a02..c8458c61f18 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/repo" ) @@ -216,7 +217,8 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), + ds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { t.Fatal(err) } diff --git a/chainindex/ddls.go b/chainindex/ddls.go new file mode 100644 index 00000000000..798eb02fe7e --- /dev/null +++ b/chainindex/ddls.go @@ -0,0 +1,47 @@ +package chainindex + +const DefaultDbFilename = "chainindex.db" + +const ( + stmtGetNonRevertedMessageInfo = "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0" + stmtGetMsgCidFromEthHash = "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ?" + stmtInsertEthTxHash = "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP" + + stmtInsertTipsetMessage = "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0" + + stmtTipsetExists = "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)" + stmtTipsetUnRevert = "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?" + + stmtRevertTipset = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" + + stmtGetMaxNonRevertedTipset = "SELECT tipset_key_cid FROM tipset_message WHERE reverted = 0 ORDER BY height DESC LIMIT 1" + + stmtRemoveRevertedTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ? AND reverted = 1" + stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" + + stmtDeleteEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` +) + +var ddls = []string{ + `CREATE TABLE IF NOT EXISTS tipset_message ( + message_id INTEGER PRIMARY KEY, + tipset_key_cid BLOB NOT NULL, + height INTEGER NOT NULL, + reverted INTEGER NOT NULL, + message_cid BLOB NOT NULL, + message_index INTEGER NOT NULL, + UNIQUE (tipset_key_cid, message_cid) + )`, + + `CREATE TABLE IF NOT EXISTS eth_tx_hash ( + tx_hash TEXT PRIMARY KEY, + message_cid BLOB NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + )`, + + `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hash (inserted_at)`, + + `CREATE INDEX IF NOT EXISTS idx_message_cid ON tipset_message (message_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_tipset_key_cid ON tipset_message (tipset_key_cid)`, +} diff --git a/chainindex/gc.go b/chainindex/gc.go new file mode 100644 index 00000000000..4aea573c6fb --- /dev/null +++ b/chainindex/gc.go @@ -0,0 +1,113 @@ +package chainindex + +import ( + "context" + "strconv" + "time" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +var ( + log = logging.Logger("chainindex") + cleanupInterval = time.Duration(8) * time.Hour +) + +func (si *SqliteIndexer) gcLoop() { + defer si.wg.Done() + + // Initial cleanup before entering the loop + si.cleanupRevertedTipsets(si.ctx) + si.gc(si.ctx) + + cleanupTicker := time.NewTicker(cleanupInterval) + defer cleanupTicker.Stop() + + for { + select { + case <-cleanupTicker.C: + si.cleanupRevertedTipsets(si.ctx) + si.gc(si.ctx) + case <-si.ctx.Done(): + return + } + } +} + +func (si *SqliteIndexer) gc(ctx context.Context) { + if si.gcRetentionEpochs <= 0 { + return + } + + head := si.cs.GetHeaviestTipSet().Height() + removeEpoch := int64(head) - si.gcRetentionEpochs + + if removeEpoch <= 0 { + return + } + + res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removeEpoch) + if err != nil { + log.Errorw("failed to remove reverted tipsets before height", "height", removeEpoch, "error", err) + return + } + + rows, err := res.RowsAffected() + if err != nil { + log.Errorw("failed to get rows affected", "error", err) + return + } + + log.Infow("gc'd tipsets", "height", removeEpoch, "nRows", rows) + + // Also GC eth hashes + + // Calculate the number of days + days := int((si.gcRetentionEpochs * 30) / (24 * 60 * 60)) + + // Ensure we have at least 1 day for GC + if days < 1 { + return + } + + res, err = si.deleteEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(days) + " day") + if err != nil { + log.Errorw("failed to delete eth hashes older than", "error", err) + return + } + + rows, err = res.RowsAffected() + if err != nil { + log.Errorw("failed to get rows affected", "error", err) + return + } + + log.Infow("gc'd eth hashes", "height", removeEpoch, "nRows", rows) +} + +func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { + head := si.cs.GetHeaviestTipSet().Height() + + finalEpoch := (head - policy.ChainFinality) - 10 // 10 is for some grace period + if finalEpoch <= 0 { + return + } + + // remove all entries from the `tipsets` table where `reverted=true` and height is < finalEpoch + // cacade delete based on foreign key constraints takes care of cleaning up the other tables + res, err := si.removeRevertedTipsetsBeforeHeightStmt.ExecContext(ctx, finalEpoch) + if err != nil { + log.Errorw("failed to remove reverted tipsets before height", "height", finalEpoch, "error", err) + return + } + + rows, err := res.RowsAffected() + if err != nil { + log.Errorw("failed to get rows affected", "error", err) + return + } + + log.Infow("removed reverted tipsets", "height", finalEpoch, "nRows", rows) +} diff --git a/chainindex/helpers.go b/chainindex/helpers.go new file mode 100644 index 00000000000..d4b37cb08bd --- /dev/null +++ b/chainindex/helpers.go @@ -0,0 +1,108 @@ +package chainindex + +import ( + "context" + "database/sql" + "os" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" +) + +func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { + // if a database already exists, we try to delete it and create a new one + if _, err := os.Stat(path); err == nil { + if err = os.Remove(path); err != nil { + return xerrors.Errorf("chainindex already exists at %s and can't be deleted", path) + } + } + + si, err := NewSqliteIndexer(path, cs, 0) + if err != nil { + return xerrors.Errorf("failed to create sqlite indexer: %w", err) + } + defer func() { + _ = si.Close() + }() + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + curTs := cs.GetHeaviestTipSet() + startHeight := curTs.Height() + + for curTs != nil { + if err := si.indexTipset(ctx, tx, curTs); err != nil { + log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) + break + } + + curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) + break + } + } + + return nil + }) + if err != nil { + return xerrors.Errorf("failed populate from snapshot: %w", err) + } + + return nil +} + +func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer Indexer) { + for { + select { + case <-ctx.Done(): + return + case u := <-ch: + if u.Type != api.MpoolAdd { + continue + } + if u.Message == nil { + continue + } + err := indexer.IndexSignedMessage(ctx, u.Message) + if err != nil { + log.Errorw("error indexing signed Mpool message", "error", err) + } + } + } +} + +// revert function for observer +func toTipsetKeyCidBytes(ts *types.TipSet) ([]byte, error) { + tsKeyCid, err := ts.Key().Cid() + if err != nil { + return nil, xerrors.Errorf("error getting tipset key cid: %w", err) + } + return tsKeyCid.Bytes(), nil +} + +func withTx(ctx context.Context, db *sql.DB, fn func(*sql.Tx) error) (err error) { + var tx *sql.Tx + tx, err = db.BeginTx(ctx, nil) + if err != nil { + return xerrors.Errorf("failed to begin transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + // A panic occurred, rollback and repanic + _ = tx.Rollback() + panic(p) + } else if err != nil { + // Something went wrong, rollback + _ = tx.Rollback() + } else { + // All good, commit + err = tx.Commit() + } + }() + + err = fn(tx) + return +} diff --git a/chainindex/indexer.go b/chainindex/indexer.go new file mode 100644 index 00000000000..8f8666a229f --- /dev/null +++ b/chainindex/indexer.go @@ -0,0 +1,300 @@ +package chainindex + +import ( + "context" + "database/sql" + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/lib/sqlite" +) + +var _ Indexer = (*SqliteIndexer)(nil) + +type SqliteIndexer struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + db *sql.DB + cs ChainStore + + insertEthTxHashStmt *sql.Stmt + getNonRevertedMsgInfoStmt *sql.Stmt + getMsgCidFromEthHashStmt *sql.Stmt + insertTipsetMessageStmt *sql.Stmt + revertTipsetStmt *sql.Stmt + getMaxNonRevertedTipsetStmt *sql.Stmt + tipsetExistsStmt *sql.Stmt + tipsetUnRevertStmt *sql.Stmt + removeRevertedTipsetsBeforeHeightStmt *sql.Stmt + removeTipsetsBeforeHeightStmt *sql.Stmt + deleteEthHashesOlderThanStmt *sql.Stmt + + gcRetentionEpochs int64 + + mu sync.Mutex + updateSubs map[uint64]*updateSub + subIdCounter uint64 +} + +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si *SqliteIndexer, err error) { + db, _, err := sqlite.Open(path) + if err != nil { + return nil, xerrors.Errorf("failed to setup message index db: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + defer func() { + if err != nil { + cancel() + _ = db.Close() + } + }() + + err = sqlite.InitDb(ctx, "chain index", db, ddls, []sqlite.MigrationFunc{}) + if err != nil { + return nil, xerrors.Errorf("failed to init message index db: %w", err) + } + + si = &SqliteIndexer{ + ctx: ctx, + cancel: cancel, + db: db, + cs: cs, + updateSubs: make(map[uint64]*updateSub), + subIdCounter: 0, + gcRetentionEpochs: gcRetentionEpochs, + } + if err = si.prepareStatements(); err != nil { + return nil, xerrors.Errorf("failed to prepare statements: %w", err) + } + + si.wg.Add(1) + go si.gcLoop() + + return si, nil +} + +func (si *SqliteIndexer) Close() error { + if si.db == nil { + return nil + } + si.cancel() + si.wg.Wait() + + if err := si.db.Close(); err != nil { + return xerrors.Errorf("failed to close db: %w", err) + } + return nil +} + +func (si *SqliteIndexer) prepareStatements() error { + var err error + + si.insertEthTxHashStmt, err = si.db.Prepare(stmtInsertEthTxHash) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "insertEthTxHashStmt", err) + } + si.getNonRevertedMsgInfoStmt, err = si.db.Prepare(stmtGetNonRevertedMessageInfo) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "getNonRevertedMsgInfoStmt", err) + } + si.getMsgCidFromEthHashStmt, err = si.db.Prepare(stmtGetMsgCidFromEthHash) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "getMsgCidFromEthHashStmt", err) + } + si.insertTipsetMessageStmt, err = si.db.Prepare(stmtInsertTipsetMessage) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "insertTipsetMessageStmt", err) + } + si.tipsetExistsStmt, err = si.db.Prepare(stmtTipsetExists) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "tipsetExistsStmt", err) + } + si.tipsetUnRevertStmt, err = si.db.Prepare(stmtTipsetUnRevert) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "tipsetUnRevertStmt", err) + } + si.revertTipsetStmt, err = si.db.Prepare(stmtRevertTipset) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "revertTipsetStmt", err) + } + si.getMaxNonRevertedTipsetStmt, err = si.db.Prepare(stmtGetMaxNonRevertedTipset) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "getMaxNonRevertedTipsetStmt", err) + } + si.removeRevertedTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveRevertedTipsetsBeforeHeight) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "removeRevertedTipsetsBeforeHeightStmt", err) + } + si.removeTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveTipsetsBeforeHeight) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "removeTipsetsBeforeHeightStmt", err) + } + si.deleteEthHashesOlderThanStmt, err = si.db.Prepare(stmtDeleteEthHashesOlderThan) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "deleteEthHashesOlderThanStmt", err) + } + + return nil +} + +func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, msgCid cid.Cid) error { + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexEthTxHash(ctx, tx, txHash, msgCid) + }) +} + +func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexSignedMessage(ctx, tx, msg) + }) +} + +func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg *types.SignedMessage) error { + if msg.Signature.Type != crypto.SigTypeDelegated { + return nil + } + + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) + if err != nil { + return xerrors.Errorf("error converting filecoin message to eth tx: %w", err) + } + + txHash, err := ethTx.TxHash() + if err != nil { + return xerrors.Errorf("error hashing transaction: %w", err) + } + + return si.indexEthTxHash(ctx, tx, txHash, msg.Cid()) +} + +func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash ethtypes.EthHash, msgCid cid.Cid) error { + insertEthTxHashStmt := tx.Stmt(si.insertEthTxHashStmt) + _, err := insertEthTxHashStmt.ExecContext(ctx, txHash.String(), msgCid.Bytes()) + if err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { + // We're moving the chain ahead from the `from` tipset to the `to` tipset + // Height(to) > Height(from) + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + // index the `to` tipset first as we only need to index the tipsets and messages for it + if err := si.indexTipset(ctx, tx, to); err != nil { + return xerrors.Errorf("error indexing tipset: %w", err) + } + + // index the `from` tipset just in case it's not indexed + if err := si.indexTipset(ctx, tx, from); err != nil { + return xerrors.Errorf("error indexing tipset: %w", err) + } + + return nil + }) + + if err != nil { + return xerrors.Errorf("error applying tipset: %w", err) + } + + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + // We're reverting the chain from the tipset at `from` to the tipset at `to`. + // Height(to) < Height(from) + + revertTsKeyCid, err := toTipsetKeyCidBytes(from) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + if _, err := tx.Stmt(si.revertTipsetStmt).ExecContext(ctx, revertTsKeyCid); err != nil { + return xerrors.Errorf("error marking tipset as reverted: %w", err) + } + + // index the `to` tipset as it has now been applied -> simply for redundancy + if err := si.indexTipset(ctx, tx, to); err != nil { + return xerrors.Errorf("error indexing tipset: %w", err) + } + + return nil + }) + if err != nil { + return xerrors.Errorf("error during revert transaction: %w", err) + } + + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { + tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) + if err != nil { + return xerrors.Errorf("error computing tipset cid: %w", err) + } + + restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + if err != nil { + return xerrors.Errorf("error restoring tipset: %w", err) + } + if restored { + return nil + } + + height := ts.Height() + insertTipsetMsgStmt := tx.Stmt(si.insertTipsetMessageStmt) + + msgs, err := si.cs.MessagesForTipset(ctx, ts) + if err != nil { + return xerrors.Errorf("error getting messages for tipset: %w", err) + } + + for i, msg := range msgs { + msg := msg + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { + return xerrors.Errorf("error inserting tipset message: %w", err) + } + + smsg, ok := msg.(*types.SignedMessage) + if !ok { + continue + } + + if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { + return xerrors.Errorf("error indexing eth tx hash: %w", err) + } + } + + return nil +} + +func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { + // Check if the tipset already exists + var exists bool + if err := tx.Stmt(si.tipsetExistsStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("error checking if tipset exists: %w", err) + } + if exists { + if _, err := tx.Stmt(si.tipsetUnRevertStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { + return false, xerrors.Errorf("error restoring tipset: %w", err) + } + return true, nil + } + return false, nil +} diff --git a/chainindex/interface.go b/chainindex/interface.go new file mode 100644 index 00000000000..5534f7a5526 --- /dev/null +++ b/chainindex/interface.go @@ -0,0 +1,81 @@ +package chainindex + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +var ErrNotFound = errors.New("message not found") + +// MsgInfo is the Message metadata the index tracks. +type MsgInfo struct { + // the message this record refers to + Message cid.Cid + // the tipset where this message was included + TipSet cid.Cid + // the epoch where this message was included + Epoch abi.ChainEpoch +} + +type Indexer interface { + IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error + IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error + + Apply(ctx context.Context, from, to *types.TipSet) error + Revert(ctx context.Context, from, to *types.TipSet) error + + // Returns (cid.Undef, nil) if the message was not found + GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) + // Returns (nil, ErrNotFound) if the message was not found + GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) + Close() error +} + +type ChainStore interface { + MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) + GetHeaviestTipSet() *types.TipSet + GetTipSetByCid(ctx context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) + GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) +} + +var _ ChainStore = (*store.ChainStore)(nil) + +type dummyIndexer struct{} + +func (dummyIndexer) Close() error { + return nil +} + +func (dummyIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + return nil +} + +func (dummyIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error { + return nil +} + +func (dummyIndexer) GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) { + return cid.Undef, ErrNotFound +} + +func (dummyIndexer) GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) { + return nil, ErrNotFound +} + +func (dummyIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { + return nil +} + +func (dummyIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + return nil +} + +var DummyIndexer Indexer = dummyIndexer{} diff --git a/chainindex/pub_sub.go b/chainindex/pub_sub.go new file mode 100644 index 00000000000..15cb6a35858 --- /dev/null +++ b/chainindex/pub_sub.go @@ -0,0 +1,59 @@ +package chainindex + +import "context" + +type updateSub struct { + ctx context.Context + cancel context.CancelFunc + + ch chan chainIndexUpdated +} + +type chainIndexUpdated struct{} + +func (si *SqliteIndexer) subscribeUpdates() (chan chainIndexUpdated, func()) { + subCtx, subCancel := context.WithCancel(context.Background()) + ch := make(chan chainIndexUpdated) + + si.mu.Lock() + subId := si.subIdCounter + si.subIdCounter++ + si.updateSubs[subId] = &updateSub{ + ctx: subCtx, + cancel: subCancel, + ch: ch, + } + si.mu.Unlock() + + unSubscribeF := func() { + si.mu.Lock() + if sub, ok := si.updateSubs[subId]; ok { + sub.cancel() + delete(si.updateSubs, subId) + } + si.mu.Unlock() + } + + return ch, unSubscribeF +} + +func (si *SqliteIndexer) notifyUpdateSubs() { + si.mu.Lock() + tSubs := make([]*updateSub, 0, len(si.updateSubs)) + for _, tSub := range si.updateSubs { + tSub := tSub + tSubs = append(tSubs, tSub) + } + si.mu.Unlock() + + for _, tSub := range tSubs { + tSub := tSub + select { + case tSub.ch <- chainIndexUpdated{}: + case <-tSub.ctx.Done(): + // subscription was cancelled, ignore + case <-si.ctx.Done(): + return + } + } +} diff --git a/chainindex/read.go b/chainindex/read.go new file mode 100644 index 00000000000..c5ec093ee61 --- /dev/null +++ b/chainindex/read.go @@ -0,0 +1,155 @@ +package chainindex + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +var ( + headIndexedWaitTimeout = 5 * time.Second +) + +func (si *SqliteIndexer) GetMaxNonRevertedTipset(ctx context.Context) (*types.TipSet, error) { + var tipsetKeyCidBytes []byte + err := si.getMaxNonRevertedTipsetStmt.QueryRowContext(ctx).Scan(&tipsetKeyCidBytes) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, xerrors.Errorf("failed to get max non reverted tipset: %w", err) + } + + tipsetKeyCid, err := cid.Cast(tipsetKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + + // Can this error out for reverted tipsets ? + return si.cs.GetTipSetByCid(ctx, tipsetKeyCid) +} + +func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { + var msgCidBytes []byte + + err := si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) + if err == sql.ErrNoRows { + err = si.waitTillHeadIndexedAndApply(ctx, func() error { + return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) + }) + } + + if err != nil { + if err == sql.ErrNoRows { + return cid.Undef, ErrNotFound + } + return cid.Undef, xerrors.Errorf("failed to get message CID from eth hash: %w", err) + } + + msgCid, err := cid.Cast(msgCidBytes) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to cast message CID: %w", err) + } + + fmt.Println("RETURNING CORRECT MSG CID") + + return msgCid, nil +} + +func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash ethtypes.EthHash, msgCidBytes *[]byte) error { + return si.getMsgCidFromEthHashStmt.QueryRowContext(ctx, txHash.String()).Scan(msgCidBytes) +} + +func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { + var tipsetKeyCidBytes []byte + var height int64 + + fmt.Println("GETTING MSG INFO") + + err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) + if err == sql.ErrNoRows { + err = si.waitTillHeadIndexedAndApply(ctx, func() error { + return si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) + }) + } + + if err != nil { + if err == sql.ErrNoRows { + fmt.Println("GETTING MSG INFO") + return nil, ErrNotFound + } + return nil, xerrors.Errorf("failed to get message info: %w", err) + } + + tipsetKey, err := cid.Cast(tipsetKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + + return &MsgInfo{ + Message: messageCid, + TipSet: tipsetKey, + Epoch: abi.ChainEpoch(height), + }, nil +} + +func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, tipsetKeyCidBytes *[]byte, height *int64) error { + return si.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) +} + +func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCid []byte) (bool, error) { + var exists bool + err := si.tipsetExistsStmt.QueryRowContext(ctx, tsKeyCid).Scan(&exists) + if err != nil { + return false, xerrors.Errorf("error checking if tipset exists: %w", err) + } + return exists, nil +} + +func (si *SqliteIndexer) waitTillHeadIndexedAndApply(ctx context.Context, applyFn func() error) error { + ctx, cancel := context.WithTimeout(ctx, headIndexedWaitTimeout) + defer cancel() + + head := si.cs.GetHeaviestTipSet() + headTsKeyCidBytes, err := toTipsetKeyCidBytes(head) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + + // is it already indexed? + if exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes); err != nil { + return xerrors.Errorf("error checking if tipset exists: %w", err) + } else if exists { + return applyFn() + } + + // wait till it is indexed + subCh, unsubFn := si.subscribeUpdates() + defer unsubFn() + + for { + exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("error checking if tipset exists: %w", err) + } + if exists { + return applyFn() + } + + select { + case <-subCh: + // Continue to next iteration to check again + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 500ef4af3ed..6106bfe2002 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -42,6 +42,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/delegated" @@ -229,7 +230,8 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), + filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index 9be42914705..f1086b6fa51 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -42,6 +42,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -514,7 +515,8 @@ var chainBalanceStateCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, + index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } @@ -738,7 +740,8 @@ var chainPledgeCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go index 85573ddb9a0..745fd0e8f65 100644 --- a/cmd/lotus-shed/gas-estimation.go +++ b/cmd/lotus-shed/gas-estimation.go @@ -25,6 +25,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -107,7 +108,8 @@ var gasTraceCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } @@ -203,7 +205,8 @@ var replayOfflineCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 378f6af5843..3cf937895e6 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -35,6 +35,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -127,7 +128,8 @@ var invariantsCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 24110860404..b79203cc1e2 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -68,6 +68,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" @@ -177,7 +178,8 @@ var migrationsCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore(), index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, + datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go index cf865f20194..c4f0d0a291d 100644 --- a/cmd/lotus-shed/state-stats.go +++ b/cmd/lotus-shed/state-stats.go @@ -39,6 +39,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -259,7 +260,8 @@ func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) } tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, + index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return nil, fmt.Errorf("failed to open state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index fd9c0284614..06758328201 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" "github.com/filecoin-project/lotus/node/repo" @@ -107,7 +108,8 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { if err != nil { return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) } - sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS, index.DummyMsgIndex) + sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, + nil, nd.MetadataDS, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) } @@ -126,7 +128,8 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) if err != nil { return nil, err } - sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return nil, xerrors.Errorf("creating state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index d73a033cf96..5c800b13884 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" ) @@ -202,7 +203,9 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch if err != nil { return err } - sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex, + chainindex.DummyIndexer) if err != nil { return err } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index e13557a943a..091a59fe1b2 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -42,6 +42,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/fsjournal" @@ -50,7 +51,6 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/testing" @@ -612,7 +612,8 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to construct beacon schedule: %w", err) } - stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return err } @@ -628,27 +629,18 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - // populate the message index if user has EnableMsgIndex enabled + // populate the chain Index from the snapshot // - c, err := lr.Config() + basePath, err := lr.SqlitePath() if err != nil { return err } - cfg, ok := c.(*config.FullNode) - if !ok { - return xerrors.Errorf("invalid config for repo, got: %T", c) - } - if cfg.Index.EnableMsgIndex { - log.Info("populating message index...") - basePath, err := lr.SqlitePath() - if err != nil { - return err - } - if err := index.PopulateAfterSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { - return err - } - log.Info("populating message index done") + + log.Info("populating chain index...") + if err := chainindex.PopulateFromSnapshot(ctx, filepath.Join(basePath, chainindex.DefaultDbFilename), cst); err != nil { + return err } + log.Info("populating chain index done") return nil } diff --git a/conformance/driver.go b/conformance/driver.go index f0dd5cc2a4c..ae3143e22dd 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -30,6 +30,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/conformance/chaos" _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures _ "github.com/filecoin-project/lotus/lib/sigs/delegated" @@ -110,7 +111,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, index.DummyMsgIndex) + sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, + index.DummyMsgIndex, chainindex.DummyIndexer) ) if err != nil { return nil, err diff --git a/itests/eth_hash_lookup_test.go b/itests/eth_hash_lookup_test.go index 9324aafbb63..087673281fe 100644 --- a/itests/eth_hash_lookup_test.go +++ b/itests/eth_hash_lookup_test.go @@ -83,8 +83,15 @@ func TestTransactionHashLookup(t *testing.T) { hash := client.EVM().SubmitTransaction(ctx, &tx) require.Equal(t, rawTxHash, hash) - mpoolTx, err := client.EthGetTransactionByHash(ctx, &hash) - require.NoError(t, err) + var mpoolTx *ethtypes.EthTx + for i := 0; i < 3; i++ { + mpoolTx, err = client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) + if mpoolTx != nil { + break + } + time.Sleep(100 * time.Millisecond) + } require.Equal(t, hash, mpoolTx.Hash) // Wait for message to land on chain diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go index d9ed752797e..d216b769328 100644 --- a/itests/msgindex_test.go +++ b/itests/msgindex_test.go @@ -52,7 +52,7 @@ func testMsgIndex( makeMsgIndex := func(cs *store.ChainStore) (index.MsgIndex, error) { var err error tmp := t.TempDir() - msgIndex, err := index.NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) + msgIndex, err := index.NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) if err == nil { mx.Lock() tmpDirs = append(tmpDirs, tmp) diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index cb489284c9a..a0982aff392 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -28,6 +28,7 @@ var pragmas = []string{ "PRAGMA journal_mode = WAL", "PRAGMA wal_autocheckpoint = 256", // checkpoint @ 256 pages "PRAGMA journal_size_limit = 0", // always reset journal and wal files + "PRAGMA foreign_keys = ON", } const metaTableDdl = `CREATE TABLE IF NOT EXISTS _meta ( diff --git a/node/builder.go b/node/builder.go index 94fe170cc21..86034322e2d 100644 --- a/node/builder.go +++ b/node/builder.go @@ -129,6 +129,8 @@ const ( StoreEventsKey + InitChainIndexerKey + _nInvokes // keep this last ) diff --git a/node/builder_chain.go b/node/builder_chain.go index ffdcf3a64a2..72168a0c2d1 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -35,6 +35,7 @@ import ( "github.com/filecoin-project/lotus/chain/wallet" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" "github.com/filecoin-project/lotus/chain/wallet/remotewallet" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/hello" @@ -285,6 +286,9 @@ func ConfigFullNode(c interface{}) Option { If(cfg.FaultReporter.EnableConsensusFaultReporter, Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), + + Override(new(chainindex.Indexer), modules.ChainIndexer(cfg.Index)), + Override(InitChainIndexerKey, modules.InitChainIndexer), ) } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 6420c0f5f14..5b6b1497a8a 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -350,6 +350,14 @@ in a cluster. Only 1 is required`, Comment: `EXPERIMENTAL FEATURE. USE WITH CAUTION EnableMsgIndex enables indexing of messages on chain.`, }, + { + Name: "GCRetentionEpochs", + Type: "int64", + + Comment: `GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. +During the garbage collection (GC) process, data older than this retention period is pruned. +A value of 0 disables GC, retaining all historical data.`, + }, }, "JournalConfig": { { diff --git a/node/config/types.go b/node/config/types.go index d7753d4e19e..e5391113896 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -632,6 +632,11 @@ type IndexConfig struct { // EXPERIMENTAL FEATURE. USE WITH CAUTION // EnableMsgIndex enables indexing of messages on chain. EnableMsgIndex bool + + // GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. + // During the garbage collection (GC) process, data older than this retention period is pruned. + // A value of 0 disables GC, retaining all historical data. + GCRetentionEpochs int64 } type HarmonyDB struct { diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 3f00b62bfb3..f36b4e5101d 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -39,6 +39,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -143,6 +144,8 @@ type EthModule struct { EthBlkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks by their CID but blocks only have the transaction hashes EthBlkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks along with full transaction payload by their CID + ChainIndexer chainindex.Indexer + ChainAPI MpoolAPI StateAPI @@ -356,9 +359,15 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * return nil, nil } - c, err := a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + if err != nil { + log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + } } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -415,7 +424,14 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas return nil, nil } - c, err := a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + } + // We fall out of the first condition and continue if errors.Is(err, ethhashlookup.ErrNotFound) { log.Debug("could not find transaction hash %s in lookup table", txHash.String()) @@ -499,9 +515,15 @@ func (a *EthModule) EthGetTransactionReceipt(ctx context.Context, txHash ethtype } func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error) { - c, err := a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(txHash) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(txHash) + if err != nil { + log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + } } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -940,10 +962,12 @@ func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.Et // make it immediately available in the transaction hash lookup db, even though it will also // eventually get there via the mpool - if err := a.EthTxHashManager.TransactionHashLookup.UpsertHash(txHash, smsg.Cid()); err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) + if err := a.ChainIndexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { + log.Errorf("error indexing tx: %s", err) } + fmt.Println("INDEXING CID", smsg.Cid()) + return ethtypes.EthHashFromTxBytes(rawTx), nil } diff --git a/node/modules/chain.go b/node/modules/chain.go index d6779a6305a..54c07fdd5b4 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -26,6 +26,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -124,7 +125,7 @@ func NetworkName(mctx helpers.MetricsCtx, ctx := helpers.LifecycleCtx(mctx, lc) - sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, index.DummyMsgIndex, chainindex.DummyIndexer) if err != nil { return "", err } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go new file mode 100644 index 00000000000..32c16c64e74 --- /dev/null +++ b/node/modules/chainindex.go @@ -0,0 +1,62 @@ +package modules + +import ( + "context" + "path/filepath" + + "go.uber.org/fx" + + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chainindex" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" +) + +func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { + + sqlitePath, err := r.SqlitePath() + if err != nil { + return nil, err + } + + // TODO Implement config driven auto-backfilling + chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), cs, cfg.GCRetentionEpochs) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return chainIndexer.Close() + }, + }) + + return chainIndexer, nil + } +} + +func InitChainIndexer(lc fx.Lifecycle, indexer chainindex.Indexer, evapi EventHelperAPI, mp *messagepool.MessagePool) { + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + ev, err := events.NewEvents(ctx, &evapi) + if err != nil { + return err + } + + // Tipset listener + _ = ev.Observe(indexer) + + ch, err := mp.Updates(ctx) + if err != nil { + return err + } + go chainindex.WaitForMpoolUpdates(ctx, ch, indexer) + + return nil + }, + }) +} diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index ff087036545..cc092fb06c8 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -2,7 +2,6 @@ package modules import ( "context" - "os" "path/filepath" "time" @@ -14,19 +13,22 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" ) -func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler) (*full.EthModule, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, ethEventHandler *full.EthEventHandler) (*full.EthModule, error) { +func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, + EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, chainindex.Indexer) (*full.EthModule, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, + mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, + ethEventHandler *full.EthEventHandler, chainIndexer chainindex.Indexer) (*full.EthModule, error) { ctx := helpers.LifecycleCtx(mctx, lc) sqlitePath, err := r.SqlitePath() @@ -36,10 +38,6 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep dbPath := filepath.Join(sqlitePath, ethhashlookup.DefaultDbFilename) - // Check if the db exists, if not, we'll back-fill some entries - _, err = os.Stat(dbPath) - dbAlreadyExists := err == nil - transactionHashLookup, err := ethhashlookup.NewTransactionHashLookup(ctx, dbPath) if err != nil { return nil, err @@ -56,13 +54,6 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep TransactionHashLookup: transactionHashLookup, } - if !dbAlreadyExists { - err = ethTxHashManager.PopulateExistingMappings(mctx, 0) - if err != nil { - return nil, err - } - } - // prefill the whole skiplist cache maintained internally by the GetTipsetByHeight go func() { start := time.Now() @@ -76,19 +67,7 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep lc.Append(fx.Hook{ OnStart: func(context.Context) error { - ev, err := events.NewEvents(ctx, &evapi) - if err != nil { - return err - } - - // Tipset listener - _ = ev.Observe(ðTxHashManager) - - ch, err := mp.Updates(ctx) - if err != nil { - return err - } - go full.WaitForMpoolUpdates(ctx, ch, ðTxHashManager) + go full.EthTxHashGC(ctx, cfg.EthTxHashMappingLifetimeDays, ðTxHashManager) return nil @@ -125,6 +104,8 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep EthBlkCache: blkCache, EthBlkTxCache: blkTxCache, + + ChainIndexer: chainIndexer, }, nil } } diff --git a/node/modules/msgindex.go b/node/modules/msgindex.go index 423be65d1b7..bdf25d3789f 100644 --- a/node/modules/msgindex.go +++ b/node/modules/msgindex.go @@ -18,7 +18,7 @@ func MsgIndex(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r return nil, err } - msgIndex, err := index.NewMsgIndex(helpers.LifecycleCtx(mctx, lc), filepath.Join(basePath, index.DefaultDbFilename), cs) + msgIndex, err := index.NewMsgIndex(helpers.LifecycleCtx(mctx, lc), filepath.Join(basePath, index.DefaultDbFilename), cs, false) if err != nil { return nil, err } diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index f3eaee219c5..4bc83dd9255 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -8,11 +8,12 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, msgIndex index.MsgIndex) (*stmgr.StateManager, error) { - sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex, chainIndexer) if err != nil { return nil, err } From 58569d690d158f38a0fc956f6f1011217afe77ec Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 18:37:22 +0400 Subject: [PATCH 02/66] dont need observer changes for now --- chain/events/events.go | 15 --------------- chain/events/observer.go | 15 --------------- 2 files changed, 30 deletions(-) diff --git a/chain/events/events.go b/chain/events/events.go index 0e8b95ff7a7..1b6cb81b08b 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -65,18 +65,3 @@ func NewEvents(ctx context.Context, api EventHelperAPI) (*Events, error) { gcConfidence := 2 * policy.ChainFinality return newEventsWithGCConfidence(ctx, api, gcConfidence) } - -func NewEventsWithHead(ctx context.Context, api EventHelperAPI, head *types.TipSet) (*Events, error) { - gcConfidence := 2 * policy.ChainFinality - cache := newCache(api, gcConfidence) - - ob := newObserverWithHead(cache, gcConfidence, head) - if err := ob.start(ctx); err != nil { - return nil, err - } - - he := newHeightEvents(cache, ob, gcConfidence) - headChange := newHCEvents(cache, ob) - - return &Events{ob, he, headChange}, nil -} diff --git a/chain/events/observer.go b/chain/events/observer.go index 15773898d00..0b021f9965b 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -41,21 +41,6 @@ func newObserver(api *cache, gcConfidence abi.ChainEpoch) *observer { return obs } -func newObserverWithHead(api *cache, gcConfidence abi.ChainEpoch, head *types.TipSet) *observer { - obs := &observer{ - api: api, - gcConfidence: gcConfidence, - head: head, - ready: make(chan struct{}), - observers: []TipSetObserver{}, - } - obs.Observe(api.observer()) - - close(obs.ready) // Close the ready channel since we already have a head - - return obs -} - func (o *observer) start(ctx context.Context) error { go o.listenHeadChanges(ctx) From 5a3f76fb4b9f31fa0f57ba2074df8f4c7bbce4db Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 19:23:57 +0400 Subject: [PATCH 03/66] changes --- chainindex/indexer.go | 34 ++++++++++++++++++++++ chainindex/interface.go | 1 + chainindex/read.go | 21 +++++++++++-- documentation/en/default-lotus-config.toml | 8 +++++ itests/eth_transactions_test.go | 6 ---- 5 files changed, 61 insertions(+), 9 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 8f8666a229f..e9432879be1 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -42,6 +42,9 @@ type SqliteIndexer struct { mu sync.Mutex updateSubs map[uint64]*updateSub subIdCounter uint64 + + closeLk sync.RWMutex + closed bool } func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si *SqliteIndexer, err error) { @@ -84,6 +87,13 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * } func (si *SqliteIndexer) Close() error { + si.closeLk.Lock() + defer si.closeLk.Unlock() + if si.closed { + return nil + } + si.closed = true + if si.db == nil { return nil } @@ -148,12 +158,24 @@ func (si *SqliteIndexer) prepareStatements() error { } func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, msgCid cid.Cid) error { + si.closeLk.RLock() + if si.closed { + return ErrClosed + } + si.closeLk.RUnlock() + return withTx(ctx, si.db, func(tx *sql.Tx) error { return si.indexEthTxHash(ctx, tx, txHash, msgCid) }) } func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + si.closeLk.RLock() + if si.closed { + return ErrClosed + } + si.closeLk.RUnlock() + return withTx(ctx, si.db, func(tx *sql.Tx) error { return si.indexSignedMessage(ctx, tx, msg) }) @@ -188,6 +210,12 @@ func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash } func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { + si.closeLk.RLock() + if si.closed { + return ErrClosed + } + si.closeLk.RUnlock() + // We're moving the chain ahead from the `from` tipset to the `to` tipset // Height(to) > Height(from) err := withTx(ctx, si.db, func(tx *sql.Tx) error { @@ -214,6 +242,12 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro } func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + si.closeLk.RLock() + if si.closed { + return ErrClosed + } + si.closeLk.RUnlock() + // We're reverting the chain from the tipset at `from` to the tipset at `to`. // Height(to) < Height(from) diff --git a/chainindex/interface.go b/chainindex/interface.go index 5534f7a5526..b337eeca61a 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -14,6 +14,7 @@ import ( ) var ErrNotFound = errors.New("message not found") +var ErrClosed = errors.New("index closed") // MsgInfo is the Message metadata the index tracks. type MsgInfo struct { diff --git a/chainindex/read.go b/chainindex/read.go index c5ec093ee61..7b35ed18a2d 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -20,6 +20,12 @@ var ( ) func (si *SqliteIndexer) GetMaxNonRevertedTipset(ctx context.Context) (*types.TipSet, error) { + si.closeLk.RLock() + if si.closed { + return nil, ErrClosed + } + si.closeLk.RUnlock() + var tipsetKeyCidBytes []byte err := si.getMaxNonRevertedTipsetStmt.QueryRowContext(ctx).Scan(&tipsetKeyCidBytes) if err != nil { @@ -39,6 +45,12 @@ func (si *SqliteIndexer) GetMaxNonRevertedTipset(ctx context.Context) (*types.Ti } func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { + si.closeLk.RLock() + if si.closed { + return cid.Undef, ErrClosed + } + si.closeLk.RUnlock() + var msgCidBytes []byte err := si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) @@ -70,11 +82,15 @@ func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash etht } func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { + si.closeLk.RLock() + if si.closed { + return nil, ErrClosed + } + si.closeLk.RUnlock() + var tipsetKeyCidBytes []byte var height int64 - fmt.Println("GETTING MSG INFO") - err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) if err == sql.ErrNoRows { err = si.waitTillHeadIndexedAndApply(ctx, func() error { @@ -84,7 +100,6 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M if err != nil { if err == sql.ErrNoRows { - fmt.Println("GETTING MSG INFO") return nil, ErrNotFound } return nil, xerrors.Errorf("failed to get message info: %w", err) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 7f39f23a5b8..4880f6de42b 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -325,6 +325,14 @@ # env var: LOTUS_INDEX_ENABLEMSGINDEX #EnableMsgIndex = false + # GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. + # During the garbage collection (GC) process, data older than this retention period is pruned. + # A value of 0 disables GC, retaining all historical data. + # + # type: int64 + # env var: LOTUS_INDEX_GCRETENTIONEPOCHS + #GCRetentionEpochs = 0 + [FaultReporter] # EnableConsensusFaultReporter controls whether the node will monitor and diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go index 224194c6d8d..36c4868ed73 100644 --- a/itests/eth_transactions_test.go +++ b/itests/eth_transactions_test.go @@ -628,12 +628,6 @@ func TestTraceTransaction(t *testing.T) { require.Contains(t, err.Error(), "transaction not found") require.Nil(t, traces) - // EthTraceTransaction errors when a trace for pending transactions is requested - traces, err = client.EthTraceTransaction(ctx, hash.String()) - require.Error(t, err) - require.Contains(t, err.Error(), "no trace for pending transactions") - require.Nil(t, traces) - receipt, err := client.EVM().WaitTransaction(ctx, hash) require.NoError(t, err) require.NotNil(t, receipt) From 9ea48f387764c0b1dd63a2e19b8a64a955b4ea5a Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 20:34:06 +0400 Subject: [PATCH 04/66] fix tests --- chainindex/indexer.go | 19 ++++++++++++++----- chainindex/interface.go | 1 + itests/eth_deploy_test.go | 31 +++++++++++++------------------ 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index e9432879be1..d4425529c1e 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -304,14 +304,23 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { return xerrors.Errorf("error inserting tipset message: %w", err) } + } - smsg, ok := msg.(*types.SignedMessage) - if !ok { - continue + for _, blk := range ts.Blocks() { + blk := blk + _, smsgs, err := si.cs.MessagesForBlock(ctx, blk) + if err != nil { + return err } - if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { - return xerrors.Errorf("error indexing eth tx hash: %w", err) + for _, smsg := range smsgs { + smsg := smsg + if smsg.Signature.Type != crypto.SigTypeDelegated { + continue + } + if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { + return xerrors.Errorf("error indexing eth tx hash: %w", err) + } } } diff --git a/chainindex/interface.go b/chainindex/interface.go index b337eeca61a..a655bbe9874 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -45,6 +45,7 @@ type ChainStore interface { GetHeaviestTipSet() *types.TipSet GetTipSetByCid(ctx context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) } var _ ChainStore = (*store.ChainStore)(nil) diff --git a/itests/eth_deploy_test.go b/itests/eth_deploy_test.go index 8fb9b1515ed..07df970e376 100644 --- a/itests/eth_deploy_test.go +++ b/itests/eth_deploy_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "encoding/json" + "fmt" "os" "reflect" "strconv" @@ -35,7 +36,7 @@ func TestDeployment(t *testing.T) { kit.MockProofs(), kit.ThroughRPC()) - miners := ens.InterconnectAll().BeginMining(blockTime) + _ = ens.InterconnectAll().BeginMining(blockTime) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -91,25 +92,22 @@ func TestDeployment(t *testing.T) { pendingFilter, err := client.EthNewPendingTransactionFilter(ctx) require.NoError(t, err) - // Pause so we can test that everything works while the message is in the message pool. - for _, miner := range miners { - miner.Pause() - } - hash := client.EVM().SubmitTransaction(ctx, &tx) - mpoolTx, err := client.EthGetTransactionByHash(ctx, &hash) - require.NoError(t, err) + var mpoolTx *ethtypes.EthTx + for i := 0; i < 3; i++ { + mpoolTx, err = client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) + if mpoolTx != nil { + break + } + time.Sleep(500 * time.Millisecond) + } require.NotNil(t, mpoolTx) // require that the hashes are identical require.Equal(t, hash, mpoolTx.Hash) - // these fields should be nil because the tx hasn't landed on chain. - require.Nil(t, mpoolTx.BlockNumber) - require.Nil(t, mpoolTx.BlockHash) - require.Nil(t, mpoolTx.TransactionIndex) - // We should be able to get the message CID immediately. mCid, err := client.EthGetMessageCidByTransactionHash(ctx, &hash) require.NoError(t, err) @@ -126,14 +124,11 @@ func TestDeployment(t *testing.T) { require.Len(t, changes.Results, 1) require.Equal(t, hash.String(), changes.Results[0]) - // Unpause mining. - for _, miner := range miners { - miner.Restart() - } - // Wait for the message to land. + fmt.Println("waiting for message to land") _, err = client.StateWaitMsg(ctx, *mCid, 3, api.LookbackNoLimit, false) require.NoError(t, err) + fmt.Println("message landed") // Then lookup the receipt. receipt, err := client.EthGetTransactionReceipt(ctx, hash) From 1e3a9d5f92353793129d6650b87676c9be59dfb6 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 20:52:30 +0400 Subject: [PATCH 05/66] fix tests --- chainindex/indexer.go | 1 - chainindex/read.go | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index d4425529c1e..573cd6dbde4 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -57,7 +57,6 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * defer func() { if err != nil { - cancel() _ = db.Close() } }() diff --git a/chainindex/read.go b/chainindex/read.go index 7b35ed18a2d..5f5f4aa5a45 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -54,11 +54,11 @@ func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.Eth var msgCidBytes []byte err := si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) - if err == sql.ErrNoRows { + /*if err == sql.ErrNoRows { err = si.waitTillHeadIndexedAndApply(ctx, func() error { return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) }) - } + }*/ if err != nil { if err == sql.ErrNoRows { @@ -92,11 +92,11 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M var height int64 err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) - if err == sql.ErrNoRows { + /*if err == sql.ErrNoRows { err = si.waitTillHeadIndexedAndApply(ctx, func() error { return si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) }) - } + }*/ if err != nil { if err == sql.ErrNoRows { From 4c34bc734492832d1b104a70f86b15f40e62c2ef Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 29 Aug 2024 22:39:28 +0400 Subject: [PATCH 06/66] use th right context --- chainindex/indexer.go | 1 + chainindex/read.go | 8 ++++---- node/modules/chainindex.go | 7 ++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 573cd6dbde4..cd50eba15df 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -58,6 +58,7 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * defer func() { if err != nil { _ = db.Close() + cancel() } }() diff --git a/chainindex/read.go b/chainindex/read.go index 5f5f4aa5a45..7b35ed18a2d 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -54,11 +54,11 @@ func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.Eth var msgCidBytes []byte err := si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) - /*if err == sql.ErrNoRows { + if err == sql.ErrNoRows { err = si.waitTillHeadIndexedAndApply(ctx, func() error { return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) }) - }*/ + } if err != nil { if err == sql.ErrNoRows { @@ -92,11 +92,11 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M var height int64 err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) - /*if err == sql.ErrNoRows { + if err == sql.ErrNoRows { err = si.waitTillHeadIndexedAndApply(ctx, func() error { return si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) }) - }*/ + } if err != nil { if err == sql.ErrNoRows { diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 32c16c64e74..27fac5405b2 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -17,7 +17,6 @@ import ( func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { - sqlitePath, err := r.SqlitePath() if err != nil { return nil, err @@ -39,9 +38,11 @@ func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.Met } } -func InitChainIndexer(lc fx.Lifecycle, indexer chainindex.Indexer, evapi EventHelperAPI, mp *messagepool.MessagePool) { +func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainindex.Indexer, evapi EventHelperAPI, mp *messagepool.MessagePool) { + ctx := helpers.LifecycleCtx(mctx, lc) + lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { + OnStart: func(_ context.Context) error { ev, err := events.NewEvents(ctx, &evapi) if err != nil { return err From 285ce263ae733dad7b36a13c1b8b193e5c181e95 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 30 Aug 2024 13:32:12 +0400 Subject: [PATCH 07/66] index empty tipsets correctly --- chainindex/ddls.go | 4 ++-- chainindex/indexer.go | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 798eb02fe7e..62ec0cd1916 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -28,8 +28,8 @@ var ddls = []string{ tipset_key_cid BLOB NOT NULL, height INTEGER NOT NULL, reverted INTEGER NOT NULL, - message_cid BLOB NOT NULL, - message_index INTEGER NOT NULL, + message_cid BLOB, + message_index INTEGER, UNIQUE (tipset_key_cid, message_cid) )`, diff --git a/chainindex/indexer.go b/chainindex/indexer.go index cd50eba15df..3ddbd539b2e 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -299,6 +299,14 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. return xerrors.Errorf("error getting messages for tipset: %w", err) } + if len(msgs) == 0 { + // If there are no messages, just insert the tipset and return + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, nil, -1); err != nil { + return xerrors.Errorf("error inserting empty tipset: %w", err) + } + return nil + } + for i, msg := range msgs { msg := msg if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { From 12e67fe4009f4d0c2d3043ce265732874a258150 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 30 Aug 2024 19:01:47 +0400 Subject: [PATCH 08/66] implement automated backfilling --- chain/events/observer.go | 18 ++++++ chainindex/ddls.go | 8 +++ chainindex/indexer.go | 124 +++++++++++++++++++++++++++++++++++++ chainindex/interface.go | 5 ++ node/modules/chainindex.go | 7 ++- 5 files changed, 161 insertions(+), 1 deletion(-) diff --git a/chain/events/observer.go b/chain/events/observer.go index 0b021f9965b..541875c68f9 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -244,6 +244,24 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err return nil } +// ObserveAndBlock registers the observer and returns the current tipset along with a handle function. +// The observer is guaranteed to observe events starting at this tipset. +// The returned handle function should be called by the client when it's ready to receive updates. +// +// This function should only be called by the client after the observer has been started. +// Note that the Observer will block all clients from recieving tipset updates until the handle is called. +func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func()) { + o.lk.Lock() + o.observers = append(o.observers, obs) + currentHead := o.head + + unlockHandle := func() { + o.lk.Unlock() + } + + return currentHead, unlockHandle +} + // Observe registers the observer, and returns the current tipset. The observer is guaranteed to // observe events starting at this tipset. // diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 62ec0cd1916..1ddb48bd69e 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -20,6 +20,14 @@ const ( stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" stmtDeleteEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` + + stmtRevertTipsetsFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" + + stmtCountMessages = "SELECT COUNT(*) FROM tipset_message" + + stmtMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` + + stmtTipsetExistsNotReverted = `SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)` ) var ddls = []string{ diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 3ddbd539b2e..b7f946b257b 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/types" @@ -36,6 +37,10 @@ type SqliteIndexer struct { removeRevertedTipsetsBeforeHeightStmt *sql.Stmt removeTipsetsBeforeHeightStmt *sql.Stmt deleteEthHashesOlderThanStmt *sql.Stmt + revertTipsetsFromHeightStmt *sql.Stmt + countMessagesStmt *sql.Stmt + minNonRevertedHeightStmt *sql.Stmt + tipsetExistsNotRevertedStmt *sql.Stmt gcRetentionEpochs int64 @@ -86,6 +91,104 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * return si, nil } +// ReconcileWithChain ensures that the index is consistent with the current chain state. +// It performs the following steps: +// 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. +// 2. Finds the lowest non-reverted height in the index. +// 3. Walks backwards from the current chain head until it finds a tipset that exists +// in the index and is not marked as reverted. +// 4. Sets a boundary epoch just above this found tipset. +// 5. Marks all tipsets above this boundary as reverted, ensuring consistency with the current chain state. +// 6. Applies all missing un-indexed tipsets starting from the last matching tipset b/w index and canonical chain +// to the current chain head. +// +// This function is crucial for maintaining index integrity, especially after chain reorgs. +// It ensures that the index accurately reflects the current state of the blockchain. +func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error { + si.closeLk.RLock() + if si.closed { + return ErrClosed + } + si.closeLk.RUnlock() + + if currHead == nil { + return nil + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + row := tx.StmtContext(ctx, si.countMessagesStmt).QueryRowContext(ctx) + var result int64 + if err := row.Scan(&result); err != nil { + return xerrors.Errorf("error counting messages: %w", err) + } + if result == 0 { + return nil + } + + // Find the minimum applied tipset in the index; this will mark the end of the reconciliation walk + row = tx.StmtContext(ctx, si.minNonRevertedHeightStmt).QueryRowContext(ctx) + if err := row.Scan(&result); err != nil { + return xerrors.Errorf("error finding boundary epoch: %w", err) + } + + boundaryEpoch := abi.ChainEpoch(result) + + var tipsetStack []*types.TipSet + + curTs := currHead + log.Infof("Starting chain reconciliation from height %d", currHead.Height()) + for curTs != nil && curTs.Height() >= boundaryEpoch { + tsKeyCidBytes, err := toTipsetKeyCidBytes(curTs) + if err != nil { + return xerrors.Errorf("error computing tipset cid: %w", err) + } + + var exists bool + err = tx.StmtContext(ctx, si.tipsetExistsNotRevertedStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + if err != nil { + return xerrors.Errorf("error checking if tipset exists and is not reverted: %w", err) + } + + if exists { + // found it! + boundaryEpoch = curTs.Height() + 1 + log.Infof("Found matching tipset at height %d, setting boundary epoch to %d", curTs.Height(), boundaryEpoch) + break + } + tipsetStack = append(tipsetStack, curTs) + + // walk up + parents := curTs.Parents() + curTs, err = si.cs.GetTipSetFromKey(ctx, parents) + if err != nil { + return xerrors.Errorf("error walking chain: %w", err) + } + } + + if curTs == nil { + log.Warn("ReconcileWithChain reached genesis without finding matching tipset") + } + + // mark all tipsets from the boundary epoch in the Index as reverted as they are not in the current canonical chain + log.Infof("Marking tipsets as reverted from height %d", boundaryEpoch) + _, err := tx.StmtContext(ctx, si.revertTipsetsFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) + if err != nil { + return xerrors.Errorf("error marking tipsets as reverted: %w", err) + } + + // Now apply all missing tipsets in reverse order i,e, we apply tipsets in [last matching tipset b/w index and canonical chain, + // current chain head] + for i := len(tipsetStack) - 1; i >= 0; i-- { + curTs := tipsetStack[i] + if err := si.indexTipset(ctx, tx, curTs); err != nil { + return xerrors.Errorf("error indexing tipset: %w", err) + } + } + + return nil + }) +} + func (si *SqliteIndexer) Close() error { si.closeLk.Lock() defer si.closeLk.Unlock() @@ -153,6 +256,27 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "deleteEthHashesOlderThanStmt", err) } + si.revertTipsetsFromHeightStmt, err = si.db.Prepare(stmtRevertTipsetsFromHeight) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "revertTipsetsFromHeightStmt", err) + } + si.countMessagesStmt, err = si.db.Prepare(stmtCountMessages) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "countMessagesStmt", err) + } + si.minNonRevertedHeightStmt, err = si.db.Prepare(stmtMinNonRevertedHeight) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "minNonRevertedHeightStmt", err) + } + si.tipsetExistsNotRevertedStmt, err = si.db.Prepare(stmtTipsetExistsNotReverted) + + if err != nil { + return xerrors.Errorf("prepare %s: %w", "tipsetExistsNotRevertedStmt", err) + } + si.tipsetExistsNotRevertedStmt, err = si.db.Prepare(stmtTipsetExistsNotReverted) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "tipsetExistsNotRevertedStmt", err) + } return nil } diff --git a/chainindex/interface.go b/chainindex/interface.go index a655bbe9874..80b1f1ecc75 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -27,6 +27,7 @@ type MsgInfo struct { } type Indexer interface { + ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error @@ -80,4 +81,8 @@ func (dummyIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { return nil } +func (dummyIndexer) ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error { + return nil +} + var DummyIndexer Indexer = dummyIndexer{} diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 27fac5405b2..d01b9ed3be0 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -5,6 +5,7 @@ import ( "path/filepath" "go.uber.org/fx" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/messagepool" @@ -49,7 +50,11 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind } // Tipset listener - _ = ev.Observe(indexer) + tipset, unlockObserver := ev.ObserveAndBlock(indexer) + if err := indexer.ReconcileWithChain(ctx, tipset); err != nil { + return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) + } + unlockObserver() ch, err := mp.Updates(ctx) if err != nil { From 33779874658e9bfd9349236a92d44d48ee5d8db9 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Wed, 4 Sep 2024 19:01:29 +0400 Subject: [PATCH 09/66] add event indexing and remove all old indices --- .../eth_transaction_hash_lookup.go | 150 --- chain/events/filter/event.go | 60 +- chain/events/filter/event_test.go | 7 +- chain/events/filter/index.go | 672 ----------- chain/events/filter/index_migrations.go | 260 ---- chain/events/filter/index_test.go | 1046 ----------------- chain/events/observer.go | 2 +- chain/gen/gen.go | 4 +- chain/index/interface.go | 35 - chain/index/msgindex.go | 502 -------- chain/index/msgindex_test.go | 307 ----- chain/stmgr/forks_test.go | 17 +- chain/stmgr/searchwait.go | 13 +- chain/stmgr/stmgr.go | 9 +- chain/store/store_test.go | 4 +- chainindex/ddls.go | 37 + chainindex/events.go | 446 +++++++ chainindex/helpers.go | 18 +- chainindex/indexer.go | 101 +- chainindex/interface.go | 64 +- chainindex/read.go | 29 - cmd/lotus-bench/import.go | 4 +- cmd/lotus-shed/balances.go | 6 +- cmd/lotus-shed/gas-estimation.go | 6 +- cmd/lotus-shed/invariants.go | 4 +- cmd/lotus-shed/migrations.go | 4 +- cmd/lotus-shed/state-stats.go | 4 +- cmd/lotus-sim/simulation/node.go | 6 +- cmd/lotus-sim/simulation/simulation.go | 5 +- cmd/lotus/daemon.go | 3 +- conformance/driver.go | 4 +- documentation/en/default-lotus-config.toml | 7 - go.mod | 2 +- go.sum | 4 +- itests/msgindex_test.go | 124 -- lib/sqlite/sqlite.go | 1 + node/builder.go | 2 - node/builder_chain.go | 5 - node/config/def.go | 7 +- node/config/doc_gen.go | 7 - node/config/types.go | 4 - node/impl/full/actor_events.go | 3 +- node/impl/full/actor_events_test.go | 21 +- node/impl/full/eth.go | 155 +-- node/impl/full/eth_events.go | 11 +- node/impl/full/txhashmanager.go | 136 --- node/modules/actorevent.go | 38 +- node/modules/chain.go | 4 +- node/modules/chainindex.go | 23 +- node/modules/ethmodule.go | 38 +- node/modules/msgindex.go | 37 - node/modules/stmgr.go | 5 +- 52 files changed, 767 insertions(+), 3696 deletions(-) delete mode 100644 chain/ethhashlookup/eth_transaction_hash_lookup.go delete mode 100644 chain/events/filter/index.go delete mode 100644 chain/events/filter/index_migrations.go delete mode 100644 chain/events/filter/index_test.go delete mode 100644 chain/index/interface.go delete mode 100644 chain/index/msgindex.go delete mode 100644 chain/index/msgindex_test.go create mode 100644 chainindex/events.go delete mode 100644 itests/msgindex_test.go delete mode 100644 node/impl/full/txhashmanager.go delete mode 100644 node/modules/msgindex.go diff --git a/chain/ethhashlookup/eth_transaction_hash_lookup.go b/chain/ethhashlookup/eth_transaction_hash_lookup.go deleted file mode 100644 index 2a34e37aa03..00000000000 --- a/chain/ethhashlookup/eth_transaction_hash_lookup.go +++ /dev/null @@ -1,150 +0,0 @@ -package ethhashlookup - -import ( - "context" - "database/sql" - "errors" - "strconv" - - "github.com/ipfs/go-cid" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "txhash.db" - -var ErrNotFound = errors.New("not found") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS eth_tx_hashes ( - hash TEXT PRIMARY KEY NOT NULL, - cid TEXT NOT NULL UNIQUE, - insertion_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL - )`, - - `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hashes (insertion_time)`, -} - -const ( - insertTxHash = `INSERT INTO eth_tx_hashes (hash, cid) VALUES(?, ?) ON CONFLICT (hash) DO UPDATE SET insertion_time = CURRENT_TIMESTAMP` - getCidFromHash = `SELECT cid FROM eth_tx_hashes WHERE hash = ?` - getHashFromCid = `SELECT hash FROM eth_tx_hashes WHERE cid = ?` - deleteOlderThan = `DELETE FROM eth_tx_hashes WHERE insertion_time < datetime('now', ?);` -) - -type EthTxHashLookup struct { - db *sql.DB - - stmtInsertTxHash *sql.Stmt - stmtGetCidFromHash *sql.Stmt - stmtGetHashFromCid *sql.Stmt - stmtDeleteOlderThan *sql.Stmt -} - -func NewTransactionHashLookup(ctx context.Context, path string) (*EthTxHashLookup, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup eth transaction hash lookup db: %w", err) - } - - if err := sqlite.InitDb(ctx, "eth transaction hash lookup", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init eth transaction hash lookup db: %w", err) - } - - ei := &EthTxHashLookup{db: db} - - if err = ei.initStatements(); err != nil { - _ = ei.Close() - return nil, xerrors.Errorf("error preparing eth transaction hash lookup db statements: %w", err) - } - - return ei, nil -} - -func (ei *EthTxHashLookup) initStatements() (err error) { - ei.stmtInsertTxHash, err = ei.db.Prepare(insertTxHash) - if err != nil { - return xerrors.Errorf("prepare stmtInsertTxHash: %w", err) - } - ei.stmtGetCidFromHash, err = ei.db.Prepare(getCidFromHash) - if err != nil { - return xerrors.Errorf("prepare stmtGetCidFromHash: %w", err) - } - ei.stmtGetHashFromCid, err = ei.db.Prepare(getHashFromCid) - if err != nil { - return xerrors.Errorf("prepare stmtGetHashFromCid: %w", err) - } - ei.stmtDeleteOlderThan, err = ei.db.Prepare(deleteOlderThan) - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOlderThan: %w", err) - } - return nil -} - -func (ei *EthTxHashLookup) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - if ei.db == nil { - return xerrors.New("db closed") - } - - _, err := ei.stmtInsertTxHash.Exec(txHash.String(), c.String()) - return err -} - -func (ei *EthTxHashLookup) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - if ei.db == nil { - return cid.Undef, xerrors.New("db closed") - } - - row := ei.stmtGetCidFromHash.QueryRow(txHash.String()) - var c string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return cid.Undef, ErrNotFound - } - return cid.Undef, err - } - return cid.Decode(c) -} - -func (ei *EthTxHashLookup) GetHashFromCid(c cid.Cid) (ethtypes.EthHash, error) { - if ei.db == nil { - return ethtypes.EmptyEthHash, xerrors.New("db closed") - } - - row := ei.stmtGetHashFromCid.QueryRow(c.String()) - var hashString string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return ethtypes.EmptyEthHash, ErrNotFound - } - return ethtypes.EmptyEthHash, err - } - return ethtypes.ParseEthHash(hashString) -} - -func (ei *EthTxHashLookup) DeleteEntriesOlderThan(days int) (int64, error) { - if ei.db == nil { - return 0, xerrors.New("db closed") - } - - res, err := ei.stmtDeleteOlderThan.Exec("-" + strconv.Itoa(days) + " day") - if err != nil { - return 0, err - } - return res.RowsAffected() -} - -func (ei *EthTxHashLookup) Close() (err error) { - if ei.db == nil { - return nil - } - db := ei.db - ei.db = nil - return db.Close() -} diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index fa17d235ea9..f5d7ac8f106 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -18,6 +18,7 @@ import ( cstore "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" ) func isIndexedValue(b uint8) bool { @@ -32,7 +33,7 @@ type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address. type EventFilter interface { Filter - TakeCollectedEvents(context.Context) []*CollectedEvent + TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error } @@ -47,24 +48,13 @@ type eventFilter struct { maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex - collected []*CollectedEvent + collected []*chainindex.CollectedEvent lastTaken time.Time ch chan<- interface{} } var _ Filter = (*eventFilter)(nil) -type CollectedEvent struct { - Entries []types.EventEntry - EmitterAddr address.Address // address of emitter - EventIdx int // index of the event within the list of emitted events in a given tipset - Reverted bool - Height abi.ChainEpoch - TipSetKey types.TipSetKey // tipset that contained the message - MsgIdx int // index of the message in the tipset - MsgCid cid.Cid // cid of message that produced event -} - func (f *eventFilter) ID() types.FilterID { return f.id } @@ -119,7 +109,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // event matches filter, so record it - cev := &CollectedEvent{ + cev := &chainindex.CollectedEvent{ Entries: ev.Entries, EmitterAddr: addr, EventIdx: eventCount, @@ -151,13 +141,13 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *eventFilter) setCollectedEvents(ces []*CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*chainindex.CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*chainindex.CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -307,7 +297,7 @@ type EventFilterManager struct { ChainStore *cstore.ChainStore AddressResolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) MaxFilterResults int - EventIndex *EventIndex + ChainIndexer chainindex.Indexer mu sync.Mutex // guards mutations to filters filters map[types.FilterID]EventFilter @@ -319,7 +309,7 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -329,12 +319,6 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { @@ -350,7 +334,7 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -360,12 +344,6 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { @@ -386,7 +364,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a currentHeight := m.currentHeight m.mu.Unlock() - if m.EventIndex == nil && minHeight != -1 && minHeight < currentHeight { + if m.ChainIndexer == nil && minHeight != -1 && minHeight < currentHeight { return nil, xerrors.Errorf("historic event index disabled") } @@ -405,11 +383,21 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a maxResults: m.MaxFilterResults, } - if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { - // Filter needs historic events - if err := m.EventIndex.prefillFilter(ctx, f, excludeReverted); err != nil { - return nil, err + if m.ChainIndexer != nil && minHeight != -1 && minHeight < currentHeight { + ef := &chainindex.EventFilter{ + MinHeight: minHeight, + MaxHeight: maxHeight, + TipsetCid: tipsetCid, + Addresses: addresses, + KeysWithCodec: keysWithCodec, + MaxResults: m.MaxFilterResults, } + ces, err := m.ChainIndexer.GetEventsForFilter(ctx, ef, excludeReverted) + if err != nil { + return nil, xerrors.Errorf("get events for filter: %w", err) + } + + f.setCollectedEvents(ces) } m.mu.Lock() diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index c650b71eb6f..7626be059f4 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -20,6 +20,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" ) func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { @@ -70,8 +71,8 @@ func TestEventFilterCollectEvents(t *testing.T) { cid14000, err := events14000.msgTs.Key().Cid() require.NoError(t, err, "tipset cid") - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ + noCollectedEvents := []*chainindex.CollectedEvent{} + oneCollectedEvent := []*chainindex.CollectedEvent{ { Entries: ev1.Entries, EmitterAddr: a1, @@ -88,7 +89,7 @@ func TestEventFilterCollectEvents(t *testing.T) { name string filter *eventFilter te *TipSetEvents - want []*CollectedEvent + want []*chainindex.CollectedEvent }{ { name: "nomatch tipset min height", diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go deleted file mode 100644 index ff7f1aeaa7e..00000000000 --- a/chain/events/filter/index.go +++ /dev/null @@ -1,672 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - "sort" - "strings" - "sync" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "events.db" - -// Any changes to this schema should be matched for the `lotus-shed indexes backfill-events` command - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS event ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key BLOB NOT NULL, - tipset_key_cid BLOB NOT NULL, - emitter_addr BLOB NOT NULL, - event_index INTEGER NOT NULL, - message_cid BLOB NOT NULL, - message_index INTEGER NOT NULL, - reverted INTEGER NOT NULL - )`, - - createIndexEventTipsetKeyCid, - createIndexEventHeight, - - `CREATE TABLE IF NOT EXISTS event_entry ( - event_id INTEGER, - indexed INTEGER NOT NULL, - flags BLOB NOT NULL, - key TEXT NOT NULL, - codec INTEGER, - value BLOB NOT NULL - )`, - - createTableEventsSeen, - - createIndexEventEntryEventId, - createIndexEventsSeenHeight, - createIndexEventsSeenTipsetKeyCid, -} - -var ( - log = logging.Logger("filter") -) - -const ( - createTableEventsSeen = `CREATE TABLE IF NOT EXISTS events_seen ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key_cid BLOB NOT NULL, - reverted INTEGER NOT NULL, - UNIQUE(height, tipset_key_cid) - )` - - // When modifying indexes in this file, it is critical to test the query plan (EXPLAIN QUERY PLAN) - // of all the variations of queries built by prefillFilter to ensure that the query first hits - // an index that narrows down results to an epoch or a reasonable range of epochs. Specifically, - // event_tipset_key_cid or event_height should be the first index. Then further narrowing can take - // place within the small subset of results. - // Unfortunately SQLite has some quirks in index selection that mean that certain query types will - // bypass these indexes if alternatives are available. This has been observed specifically on - // queries with height ranges: `height>=X AND height<=Y`. - // - // e.g. we want to see that `event_height` is the first index used in this query: - // - // EXPLAIN QUERY PLAN - // SELECT - // event.height, event.tipset_key_cid, event_entry.indexed, event_entry.codec, event_entry.key, event_entry.value - // FROM event - // JOIN - // event_entry ON event.id=event_entry.event_id, - // event_entry ee2 ON event.id=ee2.event_id - // WHERE event.height>=? AND event.height<=? AND event.reverted=? AND event.emitter_addr=? AND ee2.indexed=1 AND ee2.key=? - // ORDER BY event.height DESC, event_entry._rowid_ ASC - // - // -> - // - // QUERY PLAN - // |--SEARCH event USING INDEX event_height (height>? AND height 0 FROM events_seen WHERE tipset_key_cid=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_tipset_key_cid (tipset_key_cid=?) - &ps.getMaxHeightInIndex: `SELECT MAX(height) FROM events_seen`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height - &ps.isHeightProcessed: `SELECT COUNT(*) > 0 FROM events_seen WHERE height=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height (height=?) - - } -} - -type preparedStatements struct { - insertEvent *sql.Stmt - insertEntry *sql.Stmt - revertEventsInTipset *sql.Stmt - restoreEvent *sql.Stmt - upsertEventsSeen *sql.Stmt - revertEventSeen *sql.Stmt - restoreEventSeen *sql.Stmt - eventExists *sql.Stmt - isTipsetProcessed *sql.Stmt - getMaxHeightInIndex *sql.Stmt - isHeightProcessed *sql.Stmt -} - -type EventIndex struct { - db *sql.DB - - stmt *preparedStatements - - mu sync.Mutex - subIdCounter uint64 - updateSubs map[uint64]*updateSub -} - -type updateSub struct { - ctx context.Context - ch chan EventIndexUpdated - cancel context.CancelFunc -} - -type EventIndexUpdated struct{} - -func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - err = sqlite.InitDb(ctx, "event index", db, ddls, []sqlite.MigrationFunc{ - migrationVersion2(db, chainStore), - migrationVersion3, - migrationVersion4, - migrationVersion5, - migrationVersion6, - migrationVersion7, - }) - if err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - eventIndex := EventIndex{ - db: db, - stmt: &preparedStatements{}, - } - - if err = eventIndex.initStatements(); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err) - } - - eventIndex.updateSubs = make(map[uint64]*updateSub) - - return &eventIndex, nil -} - -func (ei *EventIndex) initStatements() error { - stmtMapping := preparedStatementMapping(ei.stmt) - for stmtPointer, query := range stmtMapping { - var err error - *stmtPointer, err = ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare statement [%s]: %w", query, err) - } - } - - return nil -} - -func (ei *EventIndex) Close() error { - if ei.db == nil { - return nil - } - return ei.db.Close() -} - -func (ei *EventIndex) SubscribeUpdates() (chan EventIndexUpdated, func()) { - subCtx, subCancel := context.WithCancel(context.Background()) - ch := make(chan EventIndexUpdated) - - tSub := &updateSub{ - ctx: subCtx, - cancel: subCancel, - ch: ch, - } - - ei.mu.Lock() - subId := ei.subIdCounter - ei.subIdCounter++ - ei.updateSubs[subId] = tSub - ei.mu.Unlock() - - unSubscribeF := func() { - ei.mu.Lock() - tSub, ok := ei.updateSubs[subId] - if !ok { - ei.mu.Unlock() - return - } - delete(ei.updateSubs, subId) - ei.mu.Unlock() - - // cancel the subscription - tSub.cancel() - } - - return tSub.ch, unSubscribeF -} - -func (ei *EventIndex) GetMaxHeightInIndex(ctx context.Context) (uint64, error) { - row := ei.stmt.getMaxHeightInIndex.QueryRowContext(ctx) - var maxHeight uint64 - err := row.Scan(&maxHeight) - return maxHeight, err -} - -func (ei *EventIndex) IsHeightPast(ctx context.Context, height uint64) (bool, error) { - maxHeight, err := ei.GetMaxHeightInIndex(ctx) - if err != nil { - return false, err - } - return height <= maxHeight, nil -} - -func (ei *EventIndex) IsTipsetProcessed(ctx context.Context, tipsetKeyCid []byte) (bool, error) { - row := ei.stmt.isTipsetProcessed.QueryRowContext(ctx, tipsetKeyCid) - var exists bool - err := row.Scan(&exists) - return exists, err -} - -func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { - tx, err := ei.db.BeginTx(ctx, nil) - if err != nil { - return xerrors.Errorf("begin transaction: %w", err) - } - // rollback the transaction (a no-op if the transaction was already committed) - defer func() { _ = tx.Rollback() }() - - tsKeyCid, err := te.msgTs.Key().Cid() - if err != nil { - return xerrors.Errorf("tipset key cid: %w", err) - } - - // lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return - if revert { - _, err = tx.Stmt(ei.stmt.revertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) - if err != nil { - return xerrors.Errorf("revert event: %w", err) - } - - _, err = tx.Stmt(ei.stmt.revertEventSeen).Exec(te.msgTs.Height(), tsKeyCid.Bytes()) - if err != nil { - return xerrors.Errorf("revert event seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - } - - // cache of lookups between actor id and f4 address - addressLookups := make(map[abi.ActorID]address.Address) - - ems, err := te.messages(ctx) - if err != nil { - return xerrors.Errorf("load executed messages: %w", err) - } - - eventCount := 0 - // iterate over all executed messages in this tipset and insert them into the database if they - // don't exist, otherwise mark them as not reverted - for msgIdx, em := range ems { - for _, ev := range em.Events() { - addr, found := addressLookups[ev.Emitter] - if !found { - var ok bool - addr, ok = resolver(ctx, ev.Emitter, te.rctTs) - if !ok { - // not an address we will be able to match against - continue - } - addressLookups[ev.Emitter] = addr - } - - // check if this event already exists in the database - var entryID sql.NullInt64 - err = tx.Stmt(ei.stmt.eventExists).QueryRow( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ).Scan(&entryID) - if err != nil { - return xerrors.Errorf("error checking if event exists: %w", err) - } - - if !entryID.Valid { - // event does not exist, lets insert it - res, err := tx.Stmt(ei.stmt.insertEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - false, // reverted - ) - if err != nil { - return xerrors.Errorf("exec insert event: %w", err) - } - - entryID.Int64, err = res.LastInsertId() - if err != nil { - return xerrors.Errorf("get last row id: %w", err) - } - - // insert all the entries for this event - for _, entry := range ev.Entries { - _, err = tx.Stmt(ei.stmt.insertEntry).Exec( - entryID.Int64, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value - ) - if err != nil { - return xerrors.Errorf("exec insert entry: %w", err) - } - } - } else { - // event already exists, lets mark it as not reverted - res, err := tx.Stmt(ei.stmt.restoreEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ) - if err != nil { - return xerrors.Errorf("exec restore event: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("error getting rows affected: %s", err) - } - - // this is a sanity check as we should only ever be updating one event - if rowsAffected != 1 { - log.Warnf("restored %d events but expected only one to exist", rowsAffected) - } - } - eventCount++ - } - } - - // this statement will mark the tipset as processed and will insert a new row if it doesn't exist - // or update the reverted field to false if it does - _, err = tx.Stmt(ei.stmt.upsertEventsSeen).Exec( - te.msgTs.Height(), - tsKeyCid.Bytes(), - ) - if err != nil { - return xerrors.Errorf("exec upsert events seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil -} - -// prefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { - values, query := makePrefillFilterQuery(f, excludeReverted) - - stmt, err := ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare prefill query: %w", err) - } - defer func() { _ = stmt.Close() }() - - q, err := stmt.QueryContext(ctx, values...) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil - } - return xerrors.Errorf("exec prefill query: %w", err) - } - defer func() { _ = q.Close() }() - - var ces []*CollectedEvent - var currentID int64 = -1 - var ce *CollectedEvent - - for q.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var row struct { - id int64 - height uint64 - tipsetKey []byte - tipsetKeyCid []byte - emitterAddr []byte - eventIndex int - messageCid []byte - messageIndex int - reverted bool - flags []byte - key string - codec uint64 - value []byte - } - - if err := q.Scan( - &row.id, - &row.height, - &row.tipsetKey, - &row.tipsetKeyCid, - &row.emitterAddr, - &row.eventIndex, - &row.messageCid, - &row.messageIndex, - &row.reverted, - &row.flags, - &row.key, - &row.codec, - &row.value, - ); err != nil { - return xerrors.Errorf("read prefill row: %w", err) - } - - if row.id != currentID { - if ce != nil { - ces = append(ces, ce) - ce = nil - // Unfortunately we can't easily incorporate the max results limit into the query due to the - // unpredictable number of rows caused by joins - // Break here to stop collecting rows - if f.maxResults > 0 && len(ces) >= f.maxResults { - break - } - } - - currentID = row.id - ce = &CollectedEvent{ - EventIdx: row.eventIndex, - Reverted: row.reverted, - Height: abi.ChainEpoch(row.height), - MsgIdx: row.messageIndex, - } - - ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) - if err != nil { - return xerrors.Errorf("parse emitter addr: %w", err) - } - - ce.TipSetKey, err = types.TipSetKeyFromBytes(row.tipsetKey) - if err != nil { - return xerrors.Errorf("parse tipsetkey: %w", err) - } - - ce.MsgCid, err = cid.Cast(row.messageCid) - if err != nil { - return xerrors.Errorf("parse message cid: %w", err) - } - } - - ce.Entries = append(ce.Entries, types.EventEntry{ - Flags: row.flags[0], - Key: row.key, - Codec: row.codec, - Value: row.value, - }) - } - - if ce != nil { - ces = append(ces, ce) - } - - if len(ces) == 0 { - return nil - } - - // collected event list is in inverted order since we selected only the most recent events - // sort it into height order - sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) - f.setCollectedEvents(ces) - - return nil -} - -func makePrefillFilterQuery(f *eventFilter, excludeReverted bool) ([]any, string) { - clauses := []string{} - values := []any{} - joins := []string{} - - if f.tipsetCid != cid.Undef { - clauses = append(clauses, "event.tipset_key_cid=?") - values = append(values, f.tipsetCid.Bytes()) - } else { - if f.minHeight >= 0 && f.minHeight == f.maxHeight { - clauses = append(clauses, "event.height=?") - values = append(values, f.minHeight) - } else { - if f.maxHeight >= 0 && f.minHeight >= 0 { - clauses = append(clauses, "event.height BETWEEN ? AND ?") - values = append(values, f.minHeight, f.maxHeight) - } else if f.minHeight >= 0 { - clauses = append(clauses, "event.height >= ?") - values = append(values, f.minHeight) - } else if f.maxHeight >= 0 { - clauses = append(clauses, "event.height <= ?") - values = append(values, f.maxHeight) - } - } - } - - if excludeReverted { - clauses = append(clauses, "event.reverted=?") - values = append(values, false) - } - - if len(f.addresses) > 0 { - for _, addr := range f.addresses { - values = append(values, addr.Bytes()) - } - clauses = append(clauses, "event.emitter_addr IN ("+strings.Repeat("?,", len(f.addresses)-1)+"?)") - } - - if len(f.keysWithCodec) > 0 { - join := 0 - for key, vals := range f.keysWithCodec { - if len(vals) > 0 { - join++ - joinAlias := fmt.Sprintf("ee%d", join) - joins = append(joins, fmt.Sprintf("event_entry %s ON event.id=%[1]s.event_id", joinAlias)) - clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) - values = append(values, key) - subclauses := make([]string, 0, len(vals)) - for _, val := range vals { - subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) - values = append(values, val.Value, val.Codec) - } - clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") - } - } - } - - s := `SELECT - event.id, - event.height, - event.tipset_key, - event.tipset_key_cid, - event.emitter_addr, - event.event_index, - event.message_cid, - event.message_index, - event.reverted, - event_entry.flags, - event_entry.key, - event_entry.codec, - event_entry.value - FROM event JOIN event_entry ON event.id=event_entry.event_id` - - if len(joins) > 0 { - s = s + ", " + strings.Join(joins, ", ") - } - - if len(clauses) > 0 { - s = s + " WHERE " + strings.Join(clauses, " AND ") - } - - // retain insertion order of event_entry rows with the implicit _rowid_ column - s += " ORDER BY event.height DESC, event_entry._rowid_ ASC" - return values, s -} diff --git a/chain/events/filter/index_migrations.go b/chain/events/filter/index_migrations.go deleted file mode 100644 index bf8fd2f943c..00000000000 --- a/chain/events/filter/index_migrations.go +++ /dev/null @@ -1,260 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -func migrationVersion2(db *sql.DB, chainStore *store.ChainStore) sqlite.MigrationFunc { - return func(ctx context.Context, tx *sql.Tx) error { - // create some temporary indices to help speed up the migration - _, err := tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) - } - - stmtDeleteOffChainEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid!=? and height=?") - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err) - } - - stmtSelectEvent, err := tx.PrepareContext(ctx, "SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1") - if err != nil { - return xerrors.Errorf("prepare stmtSelectEvent: %w", err) - } - - stmtDeleteEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid=? AND id= minHeight.Int64 { - if currTs.Height()%1000 == 0 { - log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64) - } - - tsKey := currTs.Parents() - currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey) - if err != nil { - return xerrors.Errorf("get tipset from key: %w", err) - } - log.Debugf("Migrating height %d", currTs.Height()) - - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return fmt.Errorf("tipset key cid: %w", err) - } - - // delete all events that are not in the canonical chain - _, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height()) - if err != nil { - return xerrors.Errorf("delete off chain event: %w", err) - } - - // find the first eventId from the last time the tipset was applied - var eventId sql.NullInt64 - err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - continue - } - return xerrors.Errorf("select event: %w", err) - } - - // this tipset might not have any events which is ok - if !eventId.Valid { - continue - } - log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height()) - - res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64) - if err != nil { - return xerrors.Errorf("delete event: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String()) - } - - // delete all entries that have an event_id that doesn't exist (since we don't have a foreign - // key constraint that gives us cascading deletes) - res, err := tx.ExecContext(ctx, "DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)") - if err != nil { - return xerrors.Errorf("delete event_entry: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected) - - // drop the temporary indices after the migration - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_height_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err) - } - - // original v2 migration introduced an index: - // CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key) - // which has subsequently been removed in v4, so it's omitted here - - return nil - } -} - -// migrationVersion3 migrates the schema from version 2 to version 3 by creating two indices: -// 1) an index on the event.emitter_addr column, and 2) an index on the event_entry.key column. -// -// As of version 7, these indices have been removed as they were found to be a performance -// hindrance. This migration is now a no-op. -func migrationVersion3(ctx context.Context, tx *sql.Tx) error { - return nil -} - -// migrationVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match -// the query patterns of the event filter. -// -// First it drops indexes introduced in previous migrations: -// 1. the index on the event.height and event.tipset_key columns -// 2. the index on the event_entry.key column -// -// And then creating the following indices: -// 1. an index on the event.tipset_key_cid column -// 2. an index on the event.height column -// 3. an index on the event.reverted column (removed in version 7) -// 4. a composite index on the event_entry.indexed and event_entry.key columns (removed in version 7) -// 5. a composite index on the event_entry.codec and event_entry.value columns (removed in version 7) -// 6. an index on the event_entry.event_id column -// -// Indexes 3, 4, and 5 were removed in version 7 as they were found to be a performance hindrance so -// are omitted here. -func migrationVersion4(ctx context.Context, tx *sql.Tx) error { - for _, create := range []struct { - desc string - query string - }{ - {"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"}, - {"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"}, - {"create index event_tipset_key_cid", createIndexEventTipsetKeyCid}, - {"create index event_height", createIndexEventHeight}, - {"create index event_entry_event_id", createIndexEventEntryEventId}, - } { - if _, err := tx.ExecContext(ctx, create.query); err != nil { - return xerrors.Errorf("%s: %w", create.desc, err) - } - } - - return nil -} - -// migrationVersion5 migrates the schema from version 4 to version 5 by updating the event_index -// to be 0-indexed within a tipset. -func migrationVersion5(ctx context.Context, tx *sql.Tx) error { - stmtEventIndexUpdate, err := tx.PrepareContext(ctx, "UPDATE event SET event_index = (SELECT COUNT(*) FROM event e2 WHERE e2.tipset_key_cid = event.tipset_key_cid AND e2.id <= event.id) - 1") - if err != nil { - return xerrors.Errorf("prepare stmtEventIndexUpdate: %w", err) - } - - _, err = stmtEventIndexUpdate.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("update event index: %w", err) - } - - return nil -} - -// migrationVersion6 migrates the schema from version 5 to version 6 by creating a new table -// events_seen that tracks the tipsets that have been seen by the event filter and populating it -// with the tipsets that have events in the event table. -func migrationVersion6(ctx context.Context, tx *sql.Tx) error { - stmtCreateTableEventsSeen, err := tx.PrepareContext(ctx, createTableEventsSeen) - if err != nil { - return xerrors.Errorf("prepare stmtCreateTableEventsSeen: %w", err) - } - _, err = stmtCreateTableEventsSeen.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("create table events_seen: %w", err) - } - - _, err = tx.ExecContext(ctx, createIndexEventsSeenHeight) - if err != nil { - return xerrors.Errorf("create index events_seen_height: %w", err) - } - _, err = tx.ExecContext(ctx, createIndexEventsSeenTipsetKeyCid) - if err != nil { - return xerrors.Errorf("create index events_seen_tipset_key_cid: %w", err) - } - - // INSERT an entry in the events_seen table for all epochs we do have events for in our DB - _, err = tx.ExecContext(ctx, ` - INSERT OR IGNORE INTO events_seen (height, tipset_key_cid, reverted) - SELECT DISTINCT height, tipset_key_cid, reverted FROM event -`) - if err != nil { - return xerrors.Errorf("insert events into events_seen: %w", err) - } - - return nil -} - -// migrationVersion7 migrates the schema from version 6 to version 7 by dropping the following -// indices: -// 1. the index on the event.emitter_addr column -// 2. the index on the event.reverted column -// 3. the composite index on the event_entry.indexed and event_entry.key columns -// 4. the composite index on the event_entry.codec and event_entry.value columns -// -// These indices were found to be a performance hindrance as they prevent SQLite from using the -// intended initial indexes on height or tipset_key_cid in many query variations. Without additional -// indices to fall-back on, SQLite is forced to narrow down each query via height or tipset_key_cid -// which is the desired behavior. -func migrationVersion7(ctx context.Context, tx *sql.Tx) error { - for _, drop := range []struct { - desc string - query string - }{ - {"drop index event_emitter_addr", "DROP INDEX IF EXISTS event_emitter_addr;"}, - {"drop index event_reverted", "DROP INDEX IF EXISTS event_reverted;"}, - {"drop index event_entry_indexed_key", "DROP INDEX IF EXISTS event_entry_indexed_key;"}, - {"drop index event_entry_codec_value", "DROP INDEX IF EXISTS event_entry_codec_value;"}, - } { - if _, err := tx.ExecContext(ctx, drop.query); err != nil { - return xerrors.Errorf("%s: %w", drop.desc, err) - } - } - - return nil -} diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go deleted file mode 100644 index 008b5697130..00000000000 --- a/chain/events/filter/index_test.go +++ /dev/null @@ -1,1046 +0,0 @@ -package filter - -import ( - "context" - pseudo "math/rand" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" -) - -func TestEventIndexPrefillFilter(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - - out := make(chan EventIndexUpdated, 1) - go func() { - tu := <-subCh - out <- tu - }() - - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - mh, err := ei.GetMaxHeightInIndex(context.Background()) - require.NoError(t, err) - require.Equal(t, uint64(14000), mh) - - b, err := ei.IsHeightPast(context.Background(), 14000) - require.NoError(t, err) - require.True(t, b) - - b, err = ei.IsHeightPast(context.Background(), 14001) - require.NoError(t, err) - require.False(t, b) - - b, err = ei.IsHeightPast(context.Background(), 13000) - require.NoError(t, err) - require.True(t, b) - - tsKey := events14000.msgTs.Key() - tsKeyCid, err := tsKey.Cid() - require.NoError(t, err, "tipset key cid") - - seen, err := ei.IsTipsetProcessed(context.Background(), tsKeyCid.Bytes()) - require.NoError(t, err) - require.True(t, seen, "tipset key should be seen") - - seen, err = ei.IsTipsetProcessed(context.Background(), []byte{1}) - require.NoError(t, err) - require.False(t, seen, "tipset key should not be seen") - - _ = <-out - - testCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range testCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want) - }) - } -} - -func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - a3 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - ev2 := fakeEvent( - a2ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr2")}, - }, - []kv{ - {k: "amount", v: []byte("2988182")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - revertedEvents := []*types.Event{ev2} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - revertedEm := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, revertedEvents), - evs: revertedEvents, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - twoCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - oneCollectedRevertedEvent := []*CollectedEvent{ - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - tCh := make(chan EventIndexUpdated, 3) - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - go func() { - cnt := 0 - for tu := range subCh { - tCh <- tu - cnt++ - if cnt == 3 { - close(tCh) - return - } - } - }() - - if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect reverted events") - } - if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "revert reverted events") - } - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - _ = <-tCh - _ = <-tCh - _ = <-tCh - - inclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address 2", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "match address 1", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988182"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - exclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch address 2 but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with matching reverted value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range inclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } - - for _, tc := range exclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, true); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } -} - -// TestQueryPlan is to ensure that future modifications to the db schema, or future upgrades to -// sqlite, do not change the query plan of the prepared statements used by the event index such that -// queries hit undesirable indexes which are likely to slow down the query. -// Changes that break this test need to be sure that the query plan is still efficient for the -// expected query patterns. -func TestQueryPlan(t *testing.T) { - ei, err := NewEventIndex(context.Background(), filepath.Join(t.TempDir(), "actorevents.db"), nil) - require.NoError(t, err, "create event index") - - verifyQueryPlan := func(stmt string) { - rows, err := ei.db.Query("EXPLAIN QUERY PLAN " + strings.Replace(stmt, "?", "1", -1)) - require.NoError(t, err, "explain query plan for query: "+stmt) - defer func() { - require.NoError(t, rows.Close()) - }() - // First response to EXPLAIN QUERY PLAN should show us the use of an index that we want to - // encounter first to narrow down the search space - either a height or tipset_key_cid index - // - sqlite_autoindex_events_seen_1 is for the UNIQUE constraint on events_seen - // - events_seen_height and events_seen_tipset_key_cid are explicit indexes on events_seen - // - event_height and event_tipset_key_cid are explicit indexes on event - rows.Next() - var id, parent, notused, detail string - require.NoError(t, rows.Scan(&id, &parent, ¬used, &detail), "scan explain query plan for query: "+stmt) - detail = strings.TrimSpace(detail) - var expectedIndexes = []string{ - "sqlite_autoindex_events_seen_1", - "events_seen_height", - "events_seen_tipset_key_cid", - "event_height", - "event_tipset_key_cid", - } - indexUsed := false - for _, index := range expectedIndexes { - if strings.Contains(detail, " INDEX "+index) { - indexUsed = true - break - } - } - require.True(t, indexUsed, "index used for query: "+stmt+" detail: "+detail) - - stmt = regexp.MustCompile(`(?m)^\s+`).ReplaceAllString(stmt, " ") // remove all leading whitespace from the statement - stmt = strings.Replace(stmt, "\n", "", -1) // remove all newlines from the statement - t.Logf("[%s] has plan start: %s", stmt, detail) - } - - // Test the hard-coded select and update queries - stmtMap := preparedStatementMapping(&preparedStatements{}) - for _, stmt := range stmtMap { - if strings.HasPrefix(strings.TrimSpace(strings.ToLower(stmt)), "insert") { - continue - } - verifyQueryPlan(stmt) - } - - // Test the dynamic prefillFilter queries - prefillCases := []*eventFilter{ - {}, - {minHeight: 14000, maxHeight: 14000}, - {minHeight: 14000, maxHeight: 15000}, - {tipsetCid: cid.MustParse("bafkqaaa")}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - } - for _, filter := range prefillCases { - _, query := makePrefillFilterQuery(filter, true) - verifyQueryPlan(query) - _, query = makePrefillFilterQuery(filter, false) - verifyQueryPlan(query) - } -} diff --git a/chain/events/observer.go b/chain/events/observer.go index 541875c68f9..56dc1dfd897 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -249,7 +249,7 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err // The returned handle function should be called by the client when it's ready to receive updates. // // This function should only be called by the client after the observer has been started. -// Note that the Observer will block all clients from recieving tipset updates until the handle is called. +// Note that the Observer will block all clients from receiving tipset updates until the handle is called. func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func()) { o.lk.Lock() o.observers = append(o.observers, obs) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 4ace34456c3..00ea779887b 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" @@ -44,7 +43,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/journal" @@ -260,7 +258,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //} sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, - index.DummyMsgIndex, chainindex.DummyIndexer) + nil) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chain/index/interface.go b/chain/index/interface.go deleted file mode 100644 index ceb4cca7bc9..00000000000 --- a/chain/index/interface.go +++ /dev/null @@ -1,35 +0,0 @@ -package index - -import ( - "context" - "errors" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/lotus/chainindex" -) - -var ErrNotFound = errors.New("message not found") -var ErrClosed = errors.New("index closed") - -// MsgIndex is the interface to the message index -type MsgIndex interface { - // GetMsgInfo retrieves the message metadata through the index. - // The lookup is done using the onchain message Cid; that is the signed message Cid - // for SECP messages and unsigned message Cid for BLS messages. - GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) - // Close closes the index - Close() error -} - -type dummyMsgIndex struct{} - -func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) { - return nil, ErrNotFound -} - -func (dummyMsgIndex) Close() error { - return nil -} - -var DummyMsgIndex MsgIndex = dummyMsgIndex{} diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go deleted file mode 100644 index b3059656853..00000000000 --- a/chain/index/msgindex.go +++ /dev/null @@ -1,502 +0,0 @@ -package index - -import ( - "context" - "database/sql" - "os" - "sync" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "msgindex.db" - -var log = logging.Logger("msgindex") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS messages ( - cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE, - tipset_cid VARCHAR(80) NOT NULL, - epoch INTEGER NOT NULL - )`, - `CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)`, -} - -const ( - // prepared stmts - dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?" - dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)" - dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?" - // reconciliation - dbqCountMessages = "SELECT COUNT(*) FROM messages" - dbqMinEpoch = "SELECT MIN(epoch) FROM messages" - dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?" - dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?" -) - -// coalescer configuration (TODO: use observer instead) -// these are exposed to make tests snappy -var ( - CoalesceMinDelay = time.Second - CoalesceMaxDelay = 15 * time.Second - CoalesceMergeInterval = time.Second -) - -// ChainStore interface; we could use store.ChainStore directly, -// but this simplifies unit testing. -type ChainStore interface { - SubscribeHeadChanges(f store.ReorgNotifee) - MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) - GetHeaviestTipSet() *types.TipSet - GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) -} - -var _ ChainStore = (*store.ChainStore)(nil) - -type msgIndex struct { - cs ChainStore - - db *sql.DB - selectMsgStmt *sql.Stmt - insertMsgStmt *sql.Stmt - deleteTipSetStmt *sql.Stmt - - sema chan struct{} - mx sync.Mutex - pend []headChange - - cancel func() - workers sync.WaitGroup - closeLk sync.RWMutex - closed bool -} - -var _ MsgIndex = (*msgIndex)(nil) - -type headChange struct { - rev []*types.TipSet - app []*types.TipSet -} - -func NewMsgIndex(lctx context.Context, path string, cs ChainStore, enableWrites bool) (MsgIndex, error) { - db, exists, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup message index db: %w", err) - } - - if err = sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init message index db: %w", err) - } - - // TODO we may consider populating the index when first creating the db - if exists { - if err := reconcileIndex(db, cs); err != nil { - return nil, xerrors.Errorf("error reconciling msgindex database: %w", err) - } - } - - ctx, cancel := context.WithCancel(lctx) - - msgIndex := &msgIndex{ - db: db, - cs: cs, - sema: make(chan struct{}, 1), - cancel: cancel, - } - - err = msgIndex.prepareStatements() - if err != nil { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - - return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err) - } - - if enableWrites { - rnf := store.WrapHeadChangeCoalescer( - msgIndex.onHeadChange, - CoalesceMinDelay, - CoalesceMaxDelay, - CoalesceMergeInterval, - ) - cs.SubscribeHeadChanges(rnf) - } - - msgIndex.workers.Add(1) - go msgIndex.background(ctx) - - return msgIndex, nil -} - -func PopulateAfterSnapshot(lctx context.Context, path string, cs ChainStore) error { - // if a database already exists, we try to delete it and create a new one - if _, err := os.Stat(path); err == nil { - if err = os.Remove(path); err != nil { - return xerrors.Errorf("msgindex already exists at %s and can't be deleted", path) - } - } - - db, _, err := sqlite.Open(path) - if err != nil { - return xerrors.Errorf("failed to setup message index db: %w", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - }() - - if err := sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return xerrors.Errorf("error creating msgindex database: %w", err) - } - - tx, err := db.Begin() - if err != nil { - return xerrors.Errorf("error when starting transaction: %w", err) - } - - rollback := func() { - if err := tx.Rollback(); err != nil { - log.Errorf("error in rollback: %s", err) - } - } - - insertStmt, err := tx.Prepare(dbqInsertMessage) - if err != nil { - rollback() - return xerrors.Errorf("error preparing insertStmt: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - startHeight := curTs.Height() - for curTs != nil { - tscid, err := curTs.Key().Cid() - if err != nil { - rollback() - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(curTs.Height()) - - msgs, err := cs.MessagesForTipset(lctx, curTs) - if err != nil { - log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) - break - } - - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - rollback() - return xerrors.Errorf("error inserting message: %w", err) - } - } - - curTs, err = cs.GetTipSetFromKey(lctx, curTs.Parents()) - if err != nil { - rollback() - return xerrors.Errorf("error walking chain: %w", err) - } - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("error committing transaction: %w", err) - } - - return nil -} - -func reconcileIndex(db *sql.DB, cs ChainStore) error { - // Invariant: after reconciliation, every tipset in the index is in the current chain; ie either - // the chain head or reachable by walking the chain. - // Algorithm: - // 1. Count messages in index; if none, trivially reconciled. - // TODO we may consider populating the index in that case - // 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk - // 3. Walk from current tipset until we find a tipset in the index. - // 4. Delete (revert!) all tipsets above the found tipset. - // 5. If the walk ends in the boundary epoch, then delete everything. - // - - row := db.QueryRow(dbqCountMessages) - - var result int64 - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result == 0 { - return nil - } - - row = db.QueryRow(dbqMinEpoch) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error finding boundary epoch: %w", err) - } - - boundaryEpoch := abi.ChainEpoch(result) - - countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages) - if err != nil { - return xerrors.Errorf("error preparing statement: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - for curTs != nil && curTs.Height() >= boundaryEpoch { - tsCid, err := curTs.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tsCid.String() - row = countMsgsStmt.QueryRow(key) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result > 0 { - // found it! - boundaryEpoch = curTs.Height() + 1 - break - } - - // walk up - parents := curTs.Parents() - curTs, err = cs.GetTipSetFromKey(context.TODO(), parents) - if err != nil { - return xerrors.Errorf("error walking chain: %w", err) - } - } - - // delete everything above the minEpoch - if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil { - return xerrors.Errorf("error deleting stale reorged out message: %w", err) - } - - return nil -} - -func (x *msgIndex) prepareStatements() error { - stmt, err := x.db.Prepare(dbqGetMessageInfo) - if err != nil { - return xerrors.Errorf("prepare selectMsgStmt: %w", err) - } - x.selectMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqInsertMessage) - if err != nil { - return xerrors.Errorf("prepare insertMsgStmt: %w", err) - } - x.insertMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqDeleteTipsetMessages) - if err != nil { - return xerrors.Errorf("prepare deleteTipSetStmt: %w", err) - } - x.deleteTipSetStmt = stmt - - return nil -} - -// head change notifee -func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return nil - } - - // do it in the background to avoid blocking head change processing - x.mx.Lock() - x.pend = append(x.pend, headChange{rev: rev, app: app}) - pendLen := len(x.pend) - x.mx.Unlock() - - // complain loudly if this is building backlog - if pendLen > 10 { - log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen) - } - - select { - case x.sema <- struct{}{}: - default: - } - - return nil -} - -func (x *msgIndex) background(ctx context.Context) { - defer x.workers.Done() - - for { - select { - case <-x.sema: - err := x.processHeadChanges(ctx) - if err != nil { - // we can't rely on an inconsistent index, so shut it down. - log.Errorf("error processing head change notifications: %s; shutting down message index", err) - if err2 := x.Close(); err2 != nil { - log.Errorf("error shutting down index: %s", err2) - } - } - - case <-ctx.Done(): - return - } - } -} - -func (x *msgIndex) processHeadChanges(ctx context.Context) error { - x.mx.Lock() - pend := x.pend - x.pend = nil - x.mx.Unlock() - - tx, err := x.db.Begin() - if err != nil { - return xerrors.Errorf("error creating transaction: %w", err) - } - - for _, hc := range pend { - for _, ts := range hc.rev { - if err := x.doRevert(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error reverting %s: %w", ts, err) - } - } - - for _, ts := range hc.app { - if err := x.doApply(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error applying %s: %w", ts, err) - } - } - } - - return tx.Commit() -} - -func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tskey, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tskey.String() - _, err = tx.Stmt(x.deleteTipSetStmt).Exec(key) - return err -} - -func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tscid, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(ts.Height()) - - msgs, err := x.cs.MessagesForTipset(ctx, ts) - if err != nil { - return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err) - } - - insertStmt := tx.Stmt(x.insertMsgStmt) - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - return xerrors.Errorf("error inserting message: %w", err) - } - } - - return nil -} - -// interface -func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (*chainindex.MsgInfo, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return nil, ErrClosed - } - - var ( - tipset string - epoch int64 - ) - - key := m.String() - row := x.selectMsgStmt.QueryRow(key) - err := row.Scan(&tipset, &epoch) - switch { - case err == sql.ErrNoRows: - return nil, ErrNotFound - - case err != nil: - return nil, xerrors.Errorf("error querying msgindex database: %w", err) - } - - tipsetCid, err := cid.Decode(tipset) - if err != nil { - return nil, xerrors.Errorf("error decoding tipset cid: %w", err) - } - - return &chainindex.MsgInfo{ - Message: m, - TipSet: tipsetCid, - Epoch: abi.ChainEpoch(epoch), - }, nil -} - -func (x *msgIndex) Close() error { - x.closeLk.Lock() - defer x.closeLk.Unlock() - - if x.closed { - return nil - } - - x.closed = true - - x.cancel() - x.workers.Wait() - - return x.db.Close() -} - -// informal apis for itests; not exposed in the main interface -func (x *msgIndex) CountMessages() (int64, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return 0, ErrClosed - } - - var result int64 - row := x.db.QueryRow(dbqCountMessages) - err := row.Scan(&result) - return result, err -} diff --git a/chain/index/msgindex_test.go b/chain/index/msgindex_test.go deleted file mode 100644 index add888e2eb2..00000000000 --- a/chain/index/msgindex_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package index - -import ( - "context" - "errors" - "math/rand" - "os" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" -) - -func TestBasicMsgIndex(t *testing.T) { - // the most basic of tests: - // 1. Create an index with mock chain store - // 2. Advance the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) -} - -func TestReorgMsgIndex(t *testing.T) { - // slightly more nuanced test that includes reorgs - // 1. Create an index with mock chain store - // 2. Advance/Reorg the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged messages are not present") - verifyMissing(t, cs, msgIndex, reorgme) -} - -func TestReconcileMsgIndex(t *testing.T) { - // test that exercises the reconciliation code paths - // 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex. - // 2. Close it - // 3. Reorg the mock chain store - // 4. Reopen the index to trigger reconciliation - // 5. Enxure that only the stable messages remain. - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // Close it and reorg - err = msgIndex.Close() - require.NoError(t, err) - cs.notify = nil - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - // reopen to reconcile - msgIndex, err = NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - t.Log("verifying index") - // need to step one up because the last tipset is not known by the index - cs.setHead(reorgmeParent) - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged and unknown messages are not present") - verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild) -} - -func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) { - for ts := cs.curTs; ts.Height() > 0; { - t.Logf("verify at height %d", ts.Height()) - blks := ts.Blocks() - if len(blks) == 0 { - break - } - - tsCid, err := ts.Key().Cid() - require.NoError(t, err) - - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.NoError(t, err) - require.Equal(t, tsCid, minfo.TipSet) - require.Equal(t, ts.Height(), minfo.Epoch) - } - - parents := ts.Parents() - ts, err = cs.GetTipSetFromKey(context.Background(), parents) - require.NoError(t, err) - } -} - -func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) { - for _, ts := range missing { - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - _, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.Equal(t, ErrNotFound, err) - } - } -} - -type mockChainStore struct { - notify store.ReorgNotifee - - curTs *types.TipSet - tipsets map[types.TipSetKey]*types.TipSet - msgs map[types.TipSetKey][]types.ChainMsg - - nonce uint64 -} - -var _ ChainStore = (*mockChainStore)(nil) - -var systemAddr address.Address -var rng *rand.Rand - -func init() { - systemAddr, _ = address.NewIDAddress(0) - rng = rand.New(rand.NewSource(314159)) - - // adjust those to make tests snappy - CoalesceMinDelay = 100 * time.Millisecond - CoalesceMaxDelay = time.Second - CoalesceMergeInterval = 100 * time.Millisecond -} - -func newMockChainStore() *mockChainStore { - return &mockChainStore{ - tipsets: make(map[types.TipSetKey]*types.TipSet), - msgs: make(map[types.TipSetKey][]types.ChainMsg), - } -} - -func (cs *mockChainStore) genesis() { - genBlock := mock.MkBlock(nil, 0, 0) - genTs := mock.TipSet(genBlock) - cs.msgs[genTs.Key()] = nil - cs.setHead(genTs) -} - -func (cs *mockChainStore) setHead(ts *types.TipSet) { - cs.curTs = ts - cs.tipsets[ts.Key()] = ts -} - -func (cs *mockChainStore) advance() error { - ts := cs.makeBlk() - return cs.reorg(nil, []*types.TipSet{ts}) -} - -func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error { - for _, ts := range rev { - parents := ts.Parents() - cs.curTs = cs.tipsets[parents] - } - - for _, ts := range app { - cs.tipsets[ts.Key()] = ts - cs.curTs = ts - } - - if cs.notify != nil { - return cs.notify(rev, app) - } - - return nil -} - -func (cs *mockChainStore) makeBlk() *types.TipSet { - height := cs.curTs.Height() + 1 - - blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height)) - blk.Messages = cs.makeGarbageCid() - - ts := mock.TipSet(blk) - msg1 := cs.makeMsg() - msg2 := cs.makeMsg() - cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2} - - return ts -} - -func (cs *mockChainStore) makeMsg() *types.Message { - nonce := cs.nonce - cs.nonce++ - return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce} -} - -func (cs *mockChainStore) makeGarbageCid() cid.Cid { - garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))}) - return garbage.Cid() -} - -func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) { - cs.notify = f -} - -func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { - msgs, ok := cs.msgs[ts.Key()] - if !ok { - return nil, errors.New("unknown tipset") - } - - return msgs, nil -} - -func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet { - return cs.curTs -} - -func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - ts, ok := cs.tipsets[tsk] - if !ok { - return nil, errors.New("unknown tipset") - } - return ts, nil -} - -func waitForCoalescerAfterLastEvent() { - // It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event. - // When the timer fires, it can wait up to CoalesceMinDelay again for more events. - // Therefore the total wait is 2 * CoalesceMinDelay. - // Then we wait another second for the listener (the index) to actually process events. - time.Sleep(2*CoalesceMinDelay + time.Second) -} diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 8f5b666ea15..1c9183731f3 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -36,12 +36,10 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" . "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" ) @@ -170,7 +168,7 @@ func TestForkHeightTriggers(t *testing.T) { } return st.Flush(ctx) - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -288,7 +286,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { migrationCount++ return root, nil - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -520,7 +518,7 @@ func TestForkPreMigration(t *testing.T) { return nil }, }}}, - }, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) + }, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -595,8 +593,7 @@ func TestDisablePreMigration(t *testing.T) { }, cg.BeaconSchedule(), datastore.NewMapDatastore(), - index.DummyMsgIndex, - chainindex.DummyIndexer, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -651,8 +648,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, - chainindex.DummyIndexer, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -705,8 +701,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, - chainindex.DummyIndexer, + nil, ) require.NoError(t, err) sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index e0db8adb11f..420c680b2d3 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -10,9 +10,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" ) // WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already @@ -168,7 +168,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet log.Debugf("message %s not found", mcid) } - case errors.Is(err, index.ErrNotFound): + case errors.Is(err, chainindex.ErrNotFound): // ok for the index to have incomplete data default: @@ -190,13 +190,12 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet } func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { + if sm.chainIndexer == nil { + return nil, nil, cid.Undef, chainindex.ErrNotFound + } minfo, err := sm.chainIndexer.GetMsgInfo(ctx, mcid) if err != nil { - // If chainIndexer fails, fallback to msgIndex - minfo, err = sm.msgIndex.GetMsgInfo(ctx, mcid) - if err != nil { - return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in indexes: %w", err) - } + return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in indexes: %w", err) } // check the height against the current tipset; minimum execution confidence requires that the diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 2abc943bc1b..cd376f6d573 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -28,7 +28,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" @@ -157,7 +156,6 @@ type StateManager struct { tsExecMonitor ExecMonitor beacon beacon.Schedule - msgIndex index.MsgIndex chainIndexer chainindex.Indexer // We keep a small cache for calls to ExecutionTrace which helps improve @@ -180,7 +178,7 @@ type tipSetCacheEntry struct { } func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, - metadataDs dstore.Batching, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*StateManager, error) { + metadataDs dstore.Batching, chainIndexer chainindex.Indexer) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -245,14 +243,13 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, tree: nil, }, compWait: make(map[string]chan struct{}), - msgIndex: msgIndex, chainIndexer: chainIndexer, execTraceCache: execTraceCache, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex, chainIndexer) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, chainIndexer chainindex.Indexer) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index c8458c61f18..4df171d2fa7 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -19,11 +19,9 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/repo" ) @@ -218,7 +216,7 @@ func TestChainExportImportFull(t *testing.T) { } sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), - ds, index.DummyMsgIndex, chainindex.DummyIndexer) + ds, nil) if err != nil { t.Fatal(err) } diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 1ddb48bd69e..f75c003015d 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -28,6 +28,19 @@ const ( stmtMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` stmtTipsetExistsNotReverted = `SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)` + + stmtEventsRevert = `UPDATE event SET reverted = 1 WHERE message_id IN ( + SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? + )` + + stmtEventsUnRevert = `UPDATE event SET reverted = 0 WHERE message_id IN ( + SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? + )` + + stmtGetMsgIdForMsgCidAndTipset = `SELECT message_id FROM tipset_message WHERE message_cid = ? AND tipset_key_cid = ?` + + stmtInsertEvent = "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?)" + stmtInsertEventEntry = "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)" ) var ddls = []string{ @@ -47,9 +60,33 @@ var ddls = []string{ inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP )`, + `CREATE TABLE IF NOT EXISTS event ( + event_id INTEGER PRIMARY KEY, + message_id INTEGER NOT NULL, + event_index INTEGER NOT NULL, + emitter_addr BLOB NOT NULL, + reverted INTEGER NOT NULL, + FOREIGN KEY (message_id) REFERENCES tipset_message(message_id) ON DELETE CASCADE, + UNIQUE (message_id, event_index) + )`, + + `CREATE TABLE IF NOT EXISTS event_entry ( + event_id INTEGER NOT NULL, + indexed INTEGER NOT NULL, + flags BLOB NOT NULL, + key TEXT NOT NULL, + codec INTEGER, + value BLOB NOT NULL, + FOREIGN KEY (event_id) REFERENCES event(event_id) ON DELETE CASCADE + )`, + `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hash (inserted_at)`, `CREATE INDEX IF NOT EXISTS idx_message_cid ON tipset_message (message_cid)`, `CREATE INDEX IF NOT EXISTS idx_tipset_key_cid ON tipset_message (tipset_key_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_event_message_id ON event (message_id)`, + + `CREATE INDEX IF NOT EXISTS idx_height ON tipset_message (height)`, } diff --git a/chainindex/events.go b/chainindex/events.go new file mode 100644 index 00000000000..629f7844f63 --- /dev/null +++ b/chainindex/events.go @@ -0,0 +1,446 @@ +package chainindex + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "math" + "sort" + "strings" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + amt4 "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-state-types/abi" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" +) + +type executedMessage struct { + msg types.ChainMsg + rct types.MessageReceipt + // events extracted from receipt + evs []types.Event +} + +// events are indexed against their inclusion/message tipset when we get the corresponding execution tipset +func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *types.TipSet, executionTs *types.TipSet) error { + // check if we have an event indexed for any message in the `msgTs` tipset -> if so, there's nothig to do here + msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + + // if we've already indexed events for this tipset, mark them as unreverted and return + res, err := tx.Stmt(si.eventsUnRevertStmt).ExecContext(ctx, msgTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("error unreverting events for tipset: %w", err) + } + rows, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("error unreverting events for tipset: %w", err) + } + if rows > 0 { + return nil + } + + ems, err := si.loadExecutedMessages(ctx, msgTs, executionTs) + if err != nil { + return xerrors.Errorf("error loading executed messages: %w", err) + } + eventCount := 0 + addressLookups := make(map[abi.ActorID]address.Address) + + for _, em := range ems { + msgCidBytes := em.msg.Cid().Bytes() + + // read message id for this message cid and tipset key cid + var messageID int64 + if err := tx.Stmt(si.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgCidBytes, msgTsKeyCidBytes).Scan(&messageID); err != nil { + return xerrors.Errorf("error getting message id for message cid and tipset key cid: %w", err) + } + if messageID == 0 { + return xerrors.Errorf("message id not found for message cid %s and tipset key cid %s", em.msg.Cid(), msgTs.Key()) + } + + // Insert events for this message + for _, event := range em.evs { + addr, found := addressLookups[event.Emitter] + if !found { + var ok bool + addr, ok = si.idToRobustAddrFunc(ctx, event.Emitter, executionTs) + if !ok { + // not an address we will be able to match against + continue + } + addressLookups[event.Emitter] = addr + } + + // Insert event into events table + eventResult, err := tx.Stmt(si.insertEventStmt).Exec(messageID, eventCount, addr.Bytes(), 0) + if err != nil { + return xerrors.Errorf("error inserting event: %w", err) + } + + // Get the event_id of the inserted event + eventID, err := eventResult.LastInsertId() + if err != nil { + return xerrors.Errorf("error getting last insert id for event: %w", err) + } + + // Insert event entries + for _, entry := range event.Entries { + _, err := tx.Stmt(si.insertEventEntryStmt).Exec( + eventID, + isIndexedValue(entry.Flags), + []byte{entry.Flags}, + entry.Key, + entry.Codec, + entry.Value, + ) + if err != nil { + return xerrors.Errorf("error inserting event entry: %w", err) + } + } + eventCount++ + } + } + + return nil +} + +func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + msgs, err := si.cs.MessagesForTipset(ctx, msgTs) + if err != nil { + return nil, xerrors.Errorf("error getting messages for tipset: %w", err) + } + + st := si.cs.ActorStore(ctx) + + receiptsArr, err := blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) + if err != nil { + return nil, xerrors.Errorf("error loading message receipts array: %w", err) + } + + if uint64(len(msgs)) != receiptsArr.Length() { + return nil, xerrors.Errorf("mismatching message and receipt counts (%d msgs, %d rcts)", len(msgs), receiptsArr.Length()) + } + + ems := make([]executedMessage, len(msgs)) + + for i := 0; i < len(msgs); i++ { + ems[i].msg = msgs[i] + + var rct types.MessageReceipt + found, err := receiptsArr.Get(uint64(i), &rct) + if err != nil { + return nil, xerrors.Errorf("error loading receipt %d: %w", i, err) + } + if !found { + return nil, xerrors.Errorf("receipt %d not found", i) + } + ems[i].rct = rct + + // no events in the receipt + if rct.EventsRoot == nil { + continue + } + + eventsArr, err := amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + return nil, xerrors.Errorf("error loading events amt: %w", err) + } + + ems[i].evs = make([]types.Event, eventsArr.Len()) + var evt types.Event + err = eventsArr.ForEach(ctx, func(u uint64, deferred *cbg.Deferred) error { + if u > math.MaxInt { + return xerrors.Errorf("too many events") + } + if err := evt.UnmarshalCBOR(bytes.NewReader(deferred.Raw)); err != nil { + return err + } + + cpy := evt + ems[i].evs[int(u)] = cpy + return nil + }) + + if err != nil { + return nil, xerrors.Errorf("error iterating over events for message %d: %w", i, err) + } + + } + + return ems, nil +} + +// GetEventsForFilter returns matching events for the given filter +// prefillFilter fills a filter's collection of events from the historic index +// Returns nil, nil if the filter has no matching events +// Returns nil, ErrNotFound if the filter has no matching events and the tipset is not indexed +// Returns nil, err for all other errors +func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, excludeReverted bool) ([]*CollectedEvent, error) { + if err := si.sanityCheckFilter(ctx, f); err != nil { + return nil, xerrors.Errorf("event filter is invalid: %w", err) + } + + values, query := makePrefillFilterQuery(f, excludeReverted) + + stmt, err := si.db.Prepare(query) + if err != nil { + return nil, xerrors.Errorf("prepare prefill query: %w", err) + } + defer func() { _ = stmt.Close() }() + + q, err := stmt.QueryContext(ctx, values...) + if err == sql.ErrNoRows { + // wait for head to be indexed and retry + err = si.waitTillHeadIndexedAndApply(ctx, func() error { + q, err = stmt.QueryContext(ctx, values...) + return err + }) + } + + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // if user is asking for a specific tipset, differentiate between no events for the tipset and the absence of the tipset itself + if f.TipsetCid != cid.Undef { + exists, err := si.isTipsetIndexed(ctx, f.TipsetCid.Bytes()) + if err != nil { + return nil, xerrors.Errorf("error checking if tipset exists: %w", err) + } + // we have the tipset indexed but simply dont have events for it i.e. no events matching the given filter + if exists { + return nil, nil + } + } + + // we don't have the tipset indexed + return nil, ErrNotFound + } + return nil, xerrors.Errorf("exec prefill query: %w", err) + } + defer func() { _ = q.Close() }() + + var ces []*CollectedEvent + var currentID int64 = -1 + var ce *CollectedEvent + + for q.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + var row struct { + id int64 + height uint64 + tipsetKeyCid []byte + emitterAddr []byte + eventIndex int + messageCid []byte + messageIndex int + reverted bool + flags []byte + key string + codec uint64 + value []byte + } + + if err := q.Scan( + &row.id, + &row.height, + &row.tipsetKeyCid, + &row.emitterAddr, + &row.eventIndex, + &row.messageCid, + &row.messageIndex, + &row.reverted, + &row.flags, + &row.key, + &row.codec, + &row.value, + ); err != nil { + return nil, xerrors.Errorf("read prefill row: %w", err) + } + + if row.id != currentID { + if ce != nil { + ces = append(ces, ce) + ce = nil + // Unfortunately we can't easily incorporate the max results limit into the query due to the + // unpredictable number of rows caused by joins + // Break here to stop collecting rows + if f.MaxResults > 0 && len(ces) >= f.MaxResults { + break + } + } + + currentID = row.id + ce = &CollectedEvent{ + EventIdx: row.eventIndex, + Reverted: row.reverted, + Height: abi.ChainEpoch(row.height), + MsgIdx: row.messageIndex, + } + + ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) + if err != nil { + return nil, xerrors.Errorf("parse emitter addr: %w", err) + } + + tsKeyCid, err := cid.Cast(row.tipsetKeyCid) + if err != nil { + return nil, xerrors.Errorf("parse tipsetkey cid: %w", err) + } + + ts, err := si.cs.GetTipSetByCid(ctx, tsKeyCid) + if err != nil { + return nil, xerrors.Errorf("get tipset by cid: %w", err) + } + + ce.TipSetKey = ts.Key() + + ce.MsgCid, err = cid.Cast(row.messageCid) + if err != nil { + return nil, xerrors.Errorf("parse message cid: %w", err) + } + } + + ce.Entries = append(ce.Entries, types.EventEntry{ + Flags: row.flags[0], + Key: row.key, + Codec: row.codec, + Value: row.value, + }) + } + + if ce != nil { + ces = append(ces, ce) + } + + if len(ces) == 0 { + return nil, nil + } + + // collected event list is in inverted order since we selected only the most recent events + // sort it into height order + sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) + + return ces, nil +} + +func (si *SqliteIndexer) sanityCheckFilter(ctx context.Context, f *EventFilter) error { + head := si.cs.GetHeaviestTipSet() + + if f.TipsetCid != cid.Undef { + ts, err := si.cs.GetTipSetByCid(ctx, f.TipsetCid) + if err != nil { + return xerrors.Errorf("error getting tipset by cid: %w", err) + } + if ts.Height() >= head.Height() { + return xerrors.New("cannot ask for events for a tipset >= head") + } + } + + if f.MinHeight >= head.Height() || f.MaxHeight >= head.Height() { + return xerrors.New("cannot ask for events for a tipset >= head") + } + + return nil +} + +func makePrefillFilterQuery(f *EventFilter, excludeReverted bool) ([]any, string) { + clauses := []string{} + values := []any{} + joins := []string{} + + if f.TipsetCid != cid.Undef { + clauses = append(clauses, "tm.tipset_key_cid=?") + values = append(values, f.TipsetCid.Bytes()) + } else { + if f.MinHeight >= 0 && f.MinHeight == f.MaxHeight { + clauses = append(clauses, "tm.height=?") + values = append(values, f.MinHeight) + } else { + if f.MaxHeight >= 0 && f.MinHeight >= 0 { + clauses = append(clauses, "tm.height BETWEEN ? AND ?") + values = append(values, f.MinHeight, f.MaxHeight) + } else if f.MinHeight >= 0 { + clauses = append(clauses, "tm.height >= ?") + values = append(values, f.MinHeight) + } else if f.MaxHeight >= 0 { + clauses = append(clauses, "tm.height <= ?") + values = append(values, f.MaxHeight) + } + } + } + + if excludeReverted { + clauses = append(clauses, "e.reverted=?") + values = append(values, false) + } + + if len(f.Addresses) > 0 { + for _, addr := range f.Addresses { + values = append(values, addr.Bytes()) + } + clauses = append(clauses, "e.emitter_addr IN ("+strings.Repeat("?,", len(f.Addresses)-1)+"?)") + } + + if len(f.KeysWithCodec) > 0 { + join := 0 + for key, vals := range f.KeysWithCodec { + if len(vals) > 0 { + join++ + joinAlias := fmt.Sprintf("ee%d", join) + joins = append(joins, fmt.Sprintf("event_entry %s ON e.event_id=%[1]s.event_id", joinAlias)) + clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) + values = append(values, key) + subclauses := make([]string, 0, len(vals)) + for _, val := range vals { + subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) + values = append(values, val.Value, val.Codec) + } + clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") + } + } + } + + s := `SELECT + e.event_id, + tm.height, + tm.tipset_key_cid, + e.emitter_addr, + e.event_index, + tm.message_cid, + tm.message_index, + e.reverted, + ee.flags, + ee.key, + ee.codec, + ee.value + FROM event e + JOIN tipset_message tm ON e.message_id = tm.message_id + JOIN event_entry ee ON e.event_id = ee.event_id` + + if len(joins) > 0 { + s = s + ", " + strings.Join(joins, ", ") + } + + if len(clauses) > 0 { + s = s + " WHERE " + strings.Join(clauses, " AND ") + } + + // retain insertion order of event_entry rows + s += " ORDER BY tm.height DESC, ee.rowid ASC" + return values, s +} diff --git a/chainindex/helpers.go b/chainindex/helpers.go index d4b37cb08bd..15edf5e7d26 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -32,16 +32,17 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error startHeight := curTs.Height() for curTs != nil { - if err := si.indexTipset(ctx, tx, curTs); err != nil { - log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) - break + parentTs, err := cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + return xerrors.Errorf("error getting parent tipset: %w", err) } - curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) - if err != nil { + if err := si.indexTipset(ctx, tx, curTs, parentTs, false); err != nil { log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) break } + + curTs = parentTs } return nil @@ -106,3 +107,10 @@ func withTx(ctx context.Context, db *sql.DB, fn func(*sql.Tx) error) (err error) err = fn(tx) return } + +func isIndexedValue(b uint8) bool { + // currently we mark the full entry as indexed if either the key + // or the value are indexed; in the future we will need finer-grained + // management of indices + return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 +} diff --git a/chainindex/indexer.go b/chainindex/indexer.go index b7f946b257b..b4ca0cdd51d 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" @@ -18,6 +19,9 @@ import ( var _ Indexer = (*SqliteIndexer)(nil) +// IdToRobustAddrFunc is a function type that resolves an actor ID to a robust address +type IdToRobustAddrFunc func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) + type SqliteIndexer struct { ctx context.Context cancel context.CancelFunc @@ -26,6 +30,8 @@ type SqliteIndexer struct { db *sql.DB cs ChainStore + idToRobustAddrFunc IdToRobustAddrFunc + insertEthTxHashStmt *sql.Stmt getNonRevertedMsgInfoStmt *sql.Stmt getMsgCidFromEthHashStmt *sql.Stmt @@ -41,6 +47,11 @@ type SqliteIndexer struct { countMessagesStmt *sql.Stmt minNonRevertedHeightStmt *sql.Stmt tipsetExistsNotRevertedStmt *sql.Stmt + revertEventsStmt *sql.Stmt + eventsUnRevertStmt *sql.Stmt + getMsgIdForMsgCidAndTipsetStmt *sql.Stmt + insertEventStmt *sql.Stmt + insertEventEntryStmt *sql.Stmt gcRetentionEpochs int64 @@ -91,6 +102,10 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * return si, nil } +func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddrFunc) { + si.idToRobustAddrFunc = idToRobustAddrFunc +} + // ReconcileWithChain ensures that the index is consistent with the current chain state. // It performs the following steps: // 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. @@ -180,7 +195,11 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types // current chain head] for i := len(tipsetStack) - 1; i >= 0; i-- { curTs := tipsetStack[i] - if err := si.indexTipset(ctx, tx, curTs); err != nil { + parentTs, err := si.cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + return xerrors.Errorf("error getting parent tipset: %w", err) + } + if err := si.indexTipset(ctx, tx, curTs, parentTs, true); err != nil { return xerrors.Errorf("error indexing tipset: %w", err) } } @@ -277,6 +296,29 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "tipsetExistsNotRevertedStmt", err) } + si.eventsUnRevertStmt, err = si.db.Prepare(stmtEventsUnRevert) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "eventsUnRevertStmt", err) + } + + si.revertEventsStmt, err = si.db.Prepare(stmtEventsRevert) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "revertEventsStmt", err) + } + + si.getMsgIdForMsgCidAndTipsetStmt, err = si.db.Prepare(stmtGetMsgIdForMsgCidAndTipset) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "getMsgIdForMsgCidAndTipsetStmt", err) + } + + si.insertEventStmt, err = si.db.Prepare(stmtInsertEvent) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "insertEventStmt", err) + } + si.insertEventEntryStmt, err = si.db.Prepare(stmtInsertEventEntry) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "insertEventEntryStmt", err) + } return nil } @@ -343,13 +385,7 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro // We're moving the chain ahead from the `from` tipset to the `to` tipset // Height(to) > Height(from) err := withTx(ctx, si.db, func(tx *sql.Tx) error { - // index the `to` tipset first as we only need to index the tipsets and messages for it - if err := si.indexTipset(ctx, tx, to); err != nil { - return xerrors.Errorf("error indexing tipset: %w", err) - } - - // index the `from` tipset just in case it's not indexed - if err := si.indexTipset(ctx, tx, from); err != nil { + if err := si.indexTipset(ctx, tx, to, from, true); err != nil { return xerrors.Errorf("error indexing tipset: %w", err) } @@ -380,14 +416,22 @@ func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) err return xerrors.Errorf("error getting tipset key cid: %w", err) } + // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. + // However, the tipet `T` itself is not reverted. + eventTsKeyCid, err := toTipsetKeyCidBytes(to) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + err = withTx(ctx, si.db, func(tx *sql.Tx) error { if _, err := tx.Stmt(si.revertTipsetStmt).ExecContext(ctx, revertTsKeyCid); err != nil { - return xerrors.Errorf("error marking tipset as reverted: %w", err) + return xerrors.Errorf("error marking tipset %s as reverted: %w", revertTsKeyCid, err) } - // index the `to` tipset as it has now been applied -> simply for redundancy - if err := si.indexTipset(ctx, tx, to); err != nil { - return xerrors.Errorf("error indexing tipset: %w", err) + // events are indexed against the message inclusion tipset, not the message execution tipset. + // So we need to revert the events for the message inclusion tipset. + if _, err := tx.Stmt(si.revertEventsStmt).ExecContext(ctx, eventTsKeyCid); err != nil { + return xerrors.Errorf("error reverting events for tipset %s: %w", eventTsKeyCid, err) } return nil @@ -401,13 +445,19 @@ func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) err return nil } -func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { +func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet, parentTs *types.TipSet, indexEvents bool) error { tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) if err != nil { return xerrors.Errorf("error computing tipset cid: %w", err) } - restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + parentsKeyCid, err := parentTs.Key().Cid() + if err != nil { + return xerrors.Errorf("error computing tipset parents cid: %w", err) + } + parentsKeyCidBytes := parentsKeyCid.Bytes() + + restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes, parentsKeyCidBytes) if err != nil { return xerrors.Errorf("error restoring tipset: %w", err) } @@ -428,6 +478,12 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, nil, -1); err != nil { return xerrors.Errorf("error inserting empty tipset: %w", err) } + + // we still need to index events for the parent tipset + if err := si.indexEvents(ctx, tx, parentTs, ts); err != nil { + return xerrors.Errorf("error indexing events: %w", err) + } + return nil } @@ -456,10 +512,19 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. } } + if !indexEvents { + return nil + } + + // index events + if err := si.indexEvents(ctx, tx, parentTs, ts); err != nil { + return xerrors.Errorf("error indexing events: %w", err) + } + return nil } -func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { +func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte, parentsKeyCidBytes []byte) (bool, error) { // Check if the tipset already exists var exists bool if err := tx.Stmt(si.tipsetExistsStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { @@ -469,6 +534,12 @@ func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, if _, err := tx.Stmt(si.tipsetUnRevertStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { return false, xerrors.Errorf("error restoring tipset: %w", err) } + + // also mark all the events in the parent as not reverted + if _, err := tx.Stmt(si.eventsUnRevertStmt).ExecContext(ctx, parentsKeyCidBytes); err != nil { + return false, xerrors.Errorf("error unreverting events: %w", err) + } + return true, nil } return false, nil diff --git a/chainindex/interface.go b/chainindex/interface.go index 80b1f1ecc75..0dcc44771a3 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -6,8 +6,10 @@ import ( "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" @@ -26,11 +28,33 @@ type MsgInfo struct { Epoch abi.ChainEpoch } +type CollectedEvent struct { + Entries []types.EventEntry + EmitterAddr address.Address // address of emitter + EventIdx int // index of the event within the list of emitted events in a given tipset + Reverted bool + Height abi.ChainEpoch + TipSetKey types.TipSetKey // tipset that contained the message + MsgIdx int // index of the message in the tipset + MsgCid cid.Cid // cid of message that produced event +} + +type EventFilter struct { + MinHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum + MaxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum + TipsetCid cid.Cid + Addresses []address.Address // list of actor addresses that are extpected to emit the event + + KeysWithCodec map[string][]types.ActorEventBlock // map of key names to a list of alternate values that may match + MaxResults int // maximum number of results to collect, 0 is unlimited +} + type Indexer interface { ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error + SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddrFunc) Apply(ctx context.Context, from, to *types.TipSet) error Revert(ctx context.Context, from, to *types.TipSet) error @@ -38,6 +62,9 @@ type Indexer interface { GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) // Returns (nil, ErrNotFound) if the message was not found GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) + + GetEventsForFilter(ctx context.Context, f *EventFilter, excludeReverted bool) ([]*CollectedEvent, error) + Close() error } @@ -47,42 +74,7 @@ type ChainStore interface { GetTipSetByCid(ctx context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + ActorStore(ctx context.Context) adt.Store } var _ ChainStore = (*store.ChainStore)(nil) - -type dummyIndexer struct{} - -func (dummyIndexer) Close() error { - return nil -} - -func (dummyIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { - return nil -} - -func (dummyIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error { - return nil -} - -func (dummyIndexer) GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) { - return cid.Undef, ErrNotFound -} - -func (dummyIndexer) GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) { - return nil, ErrNotFound -} - -func (dummyIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (dummyIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (dummyIndexer) ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error { - return nil -} - -var DummyIndexer Indexer = dummyIndexer{} diff --git a/chainindex/read.go b/chainindex/read.go index 7b35ed18a2d..27acb557784 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -3,7 +3,6 @@ package chainindex import ( "context" "database/sql" - "fmt" "time" "github.com/ipfs/go-cid" @@ -11,7 +10,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) @@ -19,31 +17,6 @@ var ( headIndexedWaitTimeout = 5 * time.Second ) -func (si *SqliteIndexer) GetMaxNonRevertedTipset(ctx context.Context) (*types.TipSet, error) { - si.closeLk.RLock() - if si.closed { - return nil, ErrClosed - } - si.closeLk.RUnlock() - - var tipsetKeyCidBytes []byte - err := si.getMaxNonRevertedTipsetStmt.QueryRowContext(ctx).Scan(&tipsetKeyCidBytes) - if err != nil { - if err == sql.ErrNoRows { - return nil, nil - } - return nil, xerrors.Errorf("failed to get max non reverted tipset: %w", err) - } - - tipsetKeyCid, err := cid.Cast(tipsetKeyCidBytes) - if err != nil { - return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) - } - - // Can this error out for reverted tipsets ? - return si.cs.GetTipSetByCid(ctx, tipsetKeyCid) -} - func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { si.closeLk.RLock() if si.closed { @@ -72,8 +45,6 @@ func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.Eth return cid.Undef, xerrors.Errorf("failed to cast message CID: %w", err) } - fmt.Println("RETURNING CORRECT MSG CID") - return msgCid, nil } diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 6106bfe2002..f1e2019ed7f 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -35,14 +35,12 @@ import ( badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/delegated" @@ -231,7 +229,7 @@ var importBenchCmd = &cli.Command{ // TODO: We need to supply the actual beacon after v14 stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), - filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex, chainindex.DummyIndexer) + filcns.DefaultUpgradeSchedule(), nil, metadataDs, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index f1086b6fa51..ab2bba5ff9b 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -35,14 +35,12 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -516,7 +514,7 @@ var chainBalanceStateCmd = &cli.Command{ store := adt.WrapStore(ctx, cst) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, - index.DummyMsgIndex, chainindex.DummyIndexer) + nil) if err != nil { return err } @@ -741,7 +739,7 @@ var chainPledgeCmd = &cli.Command{ store := adt.WrapStore(ctx, cst) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex, chainindex.DummyIndexer) + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go index 745fd0e8f65..b1c61b62f2c 100644 --- a/cmd/lotus-shed/gas-estimation.go +++ b/cmd/lotus-shed/gas-estimation.go @@ -19,13 +19,11 @@ import ( "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -109,7 +107,7 @@ var gasTraceCmd = &cli.Command{ defer cs.Close() //nolint:errcheck sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), - shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) + shd, mds, nil) if err != nil { return err } @@ -206,7 +204,7 @@ var replayOfflineCmd = &cli.Command{ defer cs.Close() //nolint:errcheck sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), - shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) + shd, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 3cf937895e6..8455cb917b5 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -29,13 +29,11 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -129,7 +127,7 @@ var invariantsCmd = &cli.Command{ defer cs.Close() //nolint:errcheck sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex, chainindex.DummyIndexer) + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index b79203cc1e2..4b378bbbb05 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -61,14 +61,12 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" @@ -179,7 +177,7 @@ var migrationsCmd = &cli.Command{ // Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, - datastore.NewMapDatastore(), index.DummyMsgIndex, chainindex.DummyIndexer) + datastore.NewMapDatastore(), nil) if err != nil { return err } diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go index c4f0d0a291d..4666ad02d3a 100644 --- a/cmd/lotus-shed/state-stats.go +++ b/cmd/lotus-shed/state-stats.go @@ -33,13 +33,11 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/node/repo" ) @@ -261,7 +259,7 @@ func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, - index.DummyMsgIndex, chainindex.DummyIndexer) + nil) if err != nil { return nil, fmt.Errorf("failed to open state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index 06758328201..cda3e69d839 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -12,12 +12,10 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" "github.com/filecoin-project/lotus/node/repo" @@ -109,7 +107,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) } sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, - nil, nd.MetadataDS, index.DummyMsgIndex, chainindex.DummyIndexer) + nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) } @@ -129,7 +127,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) return nil, err } sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex, chainindex.DummyIndexer) + vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("creating state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index 5c800b13884..9e85c7d6260 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -17,11 +17,9 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" ) @@ -204,8 +202,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch return err } sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex, - chainindex.DummyIndexer) + vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, nil) if err != nil { return err } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 091a59fe1b2..5a21d2258e0 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -36,7 +36,6 @@ import ( "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -613,7 +612,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) } stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex, chainindex.DummyIndexer) + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, nil) if err != nil { return err } diff --git a/conformance/driver.go b/conformance/driver.go index ae3143e22dd..f57f2f11fb7 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" @@ -30,7 +29,6 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/conformance/chaos" _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures _ "github.com/filecoin-project/lotus/lib/sigs/delegated" @@ -112,7 +110,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = consensus.NewTipSetExecutor(filcns.RewardFunc) sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, - index.DummyMsgIndex, chainindex.DummyIndexer) + nil) ) if err != nil { return nil, err diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 4880f6de42b..bef6fc66103 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -228,13 +228,6 @@ # env var: LOTUS_FEVM_ENABLEETHRPC #EnableEthRPC = false - # EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - # Set to 0 to keep all mappings - # - # type: int - # env var: LOTUS_FEVM_ETHTXHASHMAPPINGLIFETIMEDAYS - #EthTxHashMappingLifetimeDays = 0 - # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # # type: uint64 diff --git a/go.mod b/go.mod index f5e5b950e8c..2ef8195c6a8 100644 --- a/go.mod +++ b/go.mod @@ -113,7 +113,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/manifoldco/promptui v0.9.0 github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 v1.14.16 + github.com/mattn/go-sqlite3 v1.14.22 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 diff --git a/go.sum b/go.sum index 16a8178f19a..193643a51b6 100644 --- a/go.sum +++ b/go.sum @@ -908,8 +908,8 @@ github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go deleted file mode 100644 index d216b769328..00000000000 --- a/itests/msgindex_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package itests - -import ( - "context" - "os" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/exitcode" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node" -) - -func init() { - // adjust those to make tests snappy - index.CoalesceMinDelay = time.Millisecond - index.CoalesceMaxDelay = 10 * time.Millisecond - index.CoalesceMergeInterval = time.Millisecond -} - -func testMsgIndex( - t *testing.T, - name string, - run func(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)), - check func(t *testing.T, i int, msgIndex index.MsgIndex), -) { - - // create the message indices in the test context - var mx sync.Mutex - var tmpDirs []string - var msgIndices []index.MsgIndex - - t.Cleanup(func() { - for _, msgIndex := range msgIndices { - _ = msgIndex.Close() - } - - for _, tmp := range tmpDirs { - _ = os.RemoveAll(tmp) - } - }) - - makeMsgIndex := func(cs *store.ChainStore) (index.MsgIndex, error) { - var err error - tmp := t.TempDir() - msgIndex, err := index.NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs, true) - if err == nil { - mx.Lock() - tmpDirs = append(tmpDirs, tmp) - msgIndices = append(msgIndices, msgIndex) - mx.Unlock() - } - return msgIndex, err - } - - t.Run(name, func(t *testing.T) { - run(t, makeMsgIndex) - }) - - if len(msgIndices) == 0 { - t.Fatal("no message indices") - } - - for i, msgIndex := range msgIndices { - check(t, i, msgIndex) - } -} - -func checkNonEmptyMsgIndex(t *testing.T, _ int, msgIndex index.MsgIndex) { - mi, ok := msgIndex.(interface{ CountMessages() (int64, error) }) - if !ok { - t.Fatal("index does not allow counting") - } - count, err := mi.CountMessages() - require.NoError(t, err) - require.NotEqual(t, count, 0) -} - -func TestMsgIndex(t *testing.T) { - testMsgIndex(t, "testSearchMsg", testSearchMsgWithIndex, checkNonEmptyMsgIndex) -} - -func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)) { - // copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there - ctx := context.Background() - - full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex))) - - senderAddr, err := full.WalletDefaultAddress(ctx) - require.NoError(t, err) - - msg := &types.Message{ - From: senderAddr, - To: senderAddr, - Value: big.Zero(), - } - - ens.BeginMining(100 * time.Millisecond) - - sm, err := full.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - //stm: @CHAIN_STATE_WAIT_MSG_001 - res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - require.NoError(t, err) - - require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") - - //stm: @CHAIN_STATE_SEARCH_MSG_001 - searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) - require.NoError(t, err) - require.NotNil(t, searchRes) - - require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) -} diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index a0982aff392..0274a0c71f2 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -11,6 +11,7 @@ import ( "time" logging "github.com/ipfs/go-log/v2" + _ "github.com/mattn/go-sqlite3" "golang.org/x/xerrors" ) diff --git a/node/builder.go b/node/builder.go index 86034322e2d..7d03e9593a4 100644 --- a/node/builder.go +++ b/node/builder.go @@ -25,7 +25,6 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/alerting" @@ -370,7 +369,6 @@ func Test() Option { Unset(RunPeerMgrKey), Unset(new(*peermgr.PeerMgr)), Override(new(beacon.Schedule), testing.RandomBeacon), - Override(new(index.MsgIndex), modules.DummyMsgIndex), ) } diff --git a/node/builder_chain.go b/node/builder_chain.go index 72168a0c2d1..037c56af0ad 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -21,7 +21,6 @@ import ( "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/lf3" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/messagepool" @@ -278,10 +277,6 @@ func ConfigFullNode(c interface{}) Option { ), ), - // enable message index for full node when configured by the user, otherwise use dummy. - If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)), - If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)), - // enable fault reporter when configured by the user If(cfg.FaultReporter.EnableConsensusFaultReporter, Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), diff --git a/node/config/def.go b/node/config/def.go index cc390371302..527d72aa04c 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,10 +83,9 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: false, - EthTxHashMappingLifetimeDays: 0, - EthTraceFilterMaxResults: 500, - EthBlkCacheSize: 500, + EnableEthRPC: false, + EthTraceFilterMaxResults: 500, + EthBlkCacheSize: 500, }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 5b6b1497a8a..8f91eee830e 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -222,13 +222,6 @@ rewards. This address should have adequate funds to cover gas fees.`, Comment: `EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.`, - }, - { - Name: "EthTxHashMappingLifetimeDays", - Type: "int", - - Comment: `EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days -Set to 0 to keep all mappings`, }, { Name: "EthTraceFilterMaxResults", diff --git a/node/config/types.go b/node/config/types.go index e5391113896..913c9daec85 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -542,10 +542,6 @@ type FevmConfig struct { // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. EnableEthRPC bool - // EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - // Set to 0 to keep all mappings - EthTxHashMappingLifetimeDays int - // EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter EthTraceFilterMaxResults uint64 diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go index bb192a4cf28..aac86ff7434 100644 --- a/node/impl/full/actor_events.go +++ b/node/impl/full/actor_events.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" ) type ActorEventAPI interface { @@ -283,7 +284,7 @@ func (a *ActorEventHandler) SubscribeActorEventsRaw(ctx context.Context, evtFilt nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) collectEvent := func(ev interface{}) bool { - ce, ok := ev.(*filter.CollectedEvent) + ce, ok := ev.(*chainindex.CollectedEvent) if !ok { log.Errorf("got unexpected value from event filter: %T", ev) return false diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go index b4c4e103c0c..92fdbeaa87b 100644 --- a/node/impl/full/actor_events_test.go +++ b/node/impl/full/actor_events_test.go @@ -20,6 +20,7 @@ import ( "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" ) var testCid = cid.MustParse("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") @@ -331,7 +332,7 @@ func TestSubscribeActorEventsRaw(t *testing.T) { req.NoError(err) mockChain.setHeaviestTipSet(ts) - var eventsThisEpoch []*filter.CollectedEvent + var eventsThisEpoch []*chainindex.CollectedEvent if thisHeight <= finishHeight { eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] } @@ -529,13 +530,13 @@ type mockFilter struct { id types.FilterID lastTaken time.Time ch chan<- interface{} - historicalEvents []*filter.CollectedEvent + historicalEvents []*chainindex.CollectedEvent subChannelCalls int clearSubChannelCalls int lk sync.Mutex } -func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*filter.CollectedEvent) *mockFilter { +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*chainindex.CollectedEvent) *mockFilter { t.Helper() var id [32]byte _, err := rng.Read(id[:]) @@ -548,7 +549,7 @@ func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historic } } -func (m *mockFilter) sendEventToChannel(e *filter.CollectedEvent) { +func (m *mockFilter) sendEventToChannel(e *chainindex.CollectedEvent) { m.lk.Lock() defer m.lk.Unlock() if m.ch != nil { @@ -602,7 +603,7 @@ func (m *mockFilter) ClearSubChannel() { m.ch = nil } -func (m *mockFilter) TakeCollectedEvents(context.Context) []*filter.CollectedEvent { +func (m *mockFilter) TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent { e := m.historicalEvents m.historicalEvents = nil m.lastTaken = time.Now() @@ -729,7 +730,7 @@ func epochPtr(i int) *abi.ChainEpoch { return &e } -func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEvent { +func collectedToActorEvents(collected []*chainindex.CollectedEvent) []*types.ActorEvent { var out []*types.ActorEvent for _, c := range collected { out = append(out, &types.ActorEvent{ @@ -744,8 +745,8 @@ func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEv return out } -func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*filter.CollectedEvent { - var out []*filter.CollectedEvent +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*chainindex.CollectedEvent { + var out []*chainindex.CollectedEvent for h := eventStartHeight; h <= eventEndHeight; h++ { for i := int64(0); i < eventsPerHeight; i++ { out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) @@ -754,11 +755,11 @@ func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, event return out } -func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *filter.CollectedEvent { +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *chainindex.CollectedEvent { addr, err := address.NewIDAddress(uint64(rng.Int63())) require.NoError(t, err) - return &filter.CollectedEvent{ + return &chainindex.CollectedEvent{ Entries: []types.EventEntry{ {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index f36b4e5101d..2c8ae8d01dd 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -32,7 +32,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinevm "github.com/filecoin-project/lotus/chain/actors/builtin/evm" - "github.com/filecoin-project/lotus/chain/ethhashlookup" "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" @@ -47,11 +46,6 @@ var ErrUnsupported = errors.New("unsupported method") const maxEthFeeHistoryRewardPercentiles = 100 -var ( - // wait for 3 epochs - eventReadTimeout = 90 * time.Second -) - type EthModuleAPI interface { EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) @@ -137,7 +131,6 @@ type EthModule struct { Chain *store.ChainStore Mpool *messagepool.MessagePool StateManager *stmgr.StateManager - EthTxHashManager *EthTxHashManager EthTraceFilterMaxResults uint64 EthEventHandler *EthEventHandler @@ -361,12 +354,10 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * var c cid.Cid var err error - c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil { - log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) - c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + if a.ChainIndexer != nil { + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } } @@ -426,18 +417,19 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas var c cid.Cid var err error - c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil { - log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) - c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(*txHash) + if a.ChainIndexer != nil { + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } } // We fall out of the first condition and continue - if errors.Is(err, ethhashlookup.ErrNotFound) { + if errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in lookup table", txHash.String()) } else if err != nil { return nil, xerrors.Errorf("database error: %w", err) - } else { + } else if a.ChainIndexer != nil { return &c, nil } @@ -517,12 +509,11 @@ func (a *EthModule) EthGetTransactionReceipt(ctx context.Context, txHash ethtype func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error) { var c cid.Cid var err error - c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) - if err != nil { - log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) - c, err = a.EthTxHashManager.TransactionHashLookup.GetCidFromHash(txHash) + + if a.ChainIndexer != nil { + c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } } @@ -962,12 +953,12 @@ func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.Et // make it immediately available in the transaction hash lookup db, even though it will also // eventually get there via the mpool - if err := a.ChainIndexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { - log.Errorf("error indexing tx: %s", err) + if a.ChainIndexer != nil { + if err := a.ChainIndexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { + log.Errorf("error indexing tx: %s", err) + } } - fmt.Println("INDEXING CID", smsg.Cid()) - return ethtypes.EthHashFromTxBytes(rawTx), nil } @@ -1582,12 +1573,13 @@ func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.E return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*filter.CollectedEvent, error) { +func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, + filterSpec *ethtypes.EthFilterSpec) ([]*chainindex.CollectedEvent, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } - if e.EventFilterManager.EventIndex == nil { + if e.EventFilterManager.ChainIndexer == nil { return nil, xerrors.Errorf("cannot use eth_get_logs if historical event index is disabled") } @@ -1596,106 +1588,37 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec return nil, xerrors.Errorf("failed to parse eth filter spec: %w", err) } - if pf.tipsetCid == cid.Undef { - maxHeight := pf.maxHeight - if maxHeight == -1 { - // heaviest tipset doesn't have events because its messages haven't been executed yet - maxHeight = e.Chain.GetHeaviestTipSet().Height() - 1 - } - - if maxHeight < 0 { - return nil, xerrors.Errorf("maxHeight requested is less than 0") - } - - // we can't return events for the heaviest tipset as the transactions in that tipset will be executed - // in the next non null tipset (because of Filecoin's "deferred execution" model) - if maxHeight > e.Chain.GetHeaviestTipSet().Height()-1 { - return nil, xerrors.Errorf("maxHeight requested is greater than the heaviest tipset") - } - - err := e.waitForHeightProcessed(ctx, maxHeight) - if err != nil { - return nil, err - } - // TODO: Ideally we should also check that events for the epoch at `pf.minheight` have been indexed - // However, it is currently tricky to check/guarantee this for two reasons: - // a) Event Index is not aware of null-blocks. This means that the Event Index wont be able to say whether the block at - // `pf.minheight` is a null block or whether it has no events - // b) There can be holes in the index where events at certain epoch simply haven't been indexed because of edge cases around - // node restarts while indexing. This needs a long term "auto-repair"/"automated-backfilling" implementation in the index - // So, for now, the best we can do is ensure that the event index has evenets for events at height >= `pf.maxHeight` - } else { + head := e.Chain.GetHeaviestTipSet() + // should not ask for events for a tipset >= head because of deferred execution + if pf.tipsetCid != cid.Undef { ts, err := e.Chain.GetTipSetByCid(ctx, pf.tipsetCid) if err != nil { return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) } - err = e.waitForHeightProcessed(ctx, ts.Height()) - if err != nil { - return nil, err - } - - b, err := e.EventFilterManager.EventIndex.IsTipsetProcessed(ctx, pf.tipsetCid.Bytes()) - if err != nil { - return nil, xerrors.Errorf("failed to check if tipset events have been indexed: %w", err) - } - if !b { - return nil, xerrors.Errorf("event index failed to index tipset %s", pf.tipsetCid.String()) + if ts.Height() >= head.Height() { + return nil, xerrors.Errorf("cannot ask for events for a tipset >= head") } } - // Create a temporary filter - f, err := e.EventFilterManager.Install(ctx, pf.minHeight, pf.maxHeight, pf.tipsetCid, pf.addresses, pf.keys, true) - if err != nil { - return nil, xerrors.Errorf("failed to install event filter: %w", err) + if pf.maxHeight >= head.Height() { + return nil, xerrors.Errorf("cannot ask for events for a tipset >= head") } - ces := f.TakeCollectedEvents(ctx) - - _ = e.uninstallFilter(ctx, f) - - return ces, nil -} -// note that we can have null blocks at the given height and the event Index is not null block aware -// so, what we do here is wait till we see the event index contain a block at a height greater than the given height -func (e *EthEventHandler) waitForHeightProcessed(ctx context.Context, height abi.ChainEpoch) error { - ei := e.EventFilterManager.EventIndex - if height > e.Chain.GetHeaviestTipSet().Height() { - return xerrors.New("height is in the future") + ef := &chainindex.EventFilter{ + MinHeight: pf.minHeight, + MaxHeight: pf.maxHeight, + TipsetCid: pf.tipsetCid, + Addresses: pf.addresses, + KeysWithCodec: pf.keys, + MaxResults: e.EventFilterManager.MaxFilterResults, } - ctx, cancel := context.WithTimeout(ctx, eventReadTimeout) - defer cancel() - - // if the height we're interested in has already been indexed -> there's nothing to do here - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil - } - - // subscribe for updates to the event index - subCh, unSubscribeF := ei.SubscribeUpdates() - defer unSubscribeF() - - // it could be that the event index was update while the subscription was being processed -> check if index has what we need now - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil + ces, err := e.EventFilterManager.ChainIndexer.GetEventsForFilter(ctx, ef, true) + if err != nil { + return nil, xerrors.Errorf("failed to get events for filter: %w", err) } - for { - select { - case <-subCh: - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil - } - case <-ctx.Done(): - return ctx.Err() - } - } + return ces, nil } func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { diff --git a/node/impl/full/eth_events.go b/node/impl/full/eth_events.go index 0c474b92fe2..171b5f9e164 100644 --- a/node/impl/full/eth_events.go +++ b/node/impl/full/eth_events.go @@ -16,10 +16,11 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/chainindex" ) type filterEventCollector interface { - TakeCollectedEvents(context.Context) []*filter.CollectedEvent + TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent } type filterMessageCollector interface { @@ -93,7 +94,7 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes return data, topics, true } -func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { +func ethFilterLogsFromEvents(ctx context.Context, evs []*chainindex.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { var logs []ethtypes.EthLog for _, ev := range evs { log := ethtypes.EthLog{ @@ -140,7 +141,7 @@ func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, return logs, nil } -func ethFilterResultFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { +func ethFilterResultFromEvents(ctx context.Context, evs []*chainindex.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { logs, err := ethFilterLogsFromEvents(ctx, evs, sa) if err != nil { return nil, err @@ -347,8 +348,8 @@ func (e *ethSubscription) start(ctx context.Context) { return case v := <-e.in: switch vt := v.(type) { - case *filter.CollectedEvent: - evs, err := ethFilterResultFromEvents(ctx, []*filter.CollectedEvent{vt}, e.StateAPI) + case *chainindex.CollectedEvent: + evs, err := ethFilterResultFromEvents(ctx, []*chainindex.CollectedEvent{vt}, e.StateAPI) if err != nil { continue } diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go deleted file mode 100644 index df31670b60a..00000000000 --- a/node/impl/full/txhashmanager.go +++ /dev/null @@ -1,136 +0,0 @@ -package full - -import ( - "context" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build/buildconstants" - "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/ethtypes" -) - -type EthTxHashManager struct { - StateAPI StateAPI - TransactionHashLookup *ethhashlookup.EthTxHashLookup -} - -func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error { - if minHeight < buildconstants.UpgradeHyggeHeight { - minHeight = buildconstants.UpgradeHyggeHeight - } - - ts := m.StateAPI.Chain.GetHeaviestTipSet() - for ts.Height() > minHeight { - for _, block := range ts.Blocks() { - msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block) - if err != nil { - // If we can't find the messages, we've either imported from snapshot or pruned the store - log.Debug("exiting message mapping population at epoch ", ts.Height()) - return nil - } - - for _, msg := range msgs { - m.ProcessSignedMessage(ctx, msg) - } - } - - var err error - ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents()) - if err != nil { - return err - } - } - - return nil -} - -func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error { - for _, blk := range to.Blocks() { - _, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk) - if err != nil { - return err - } - - for _, smsg := range smsgs { - if smsg.Signature.Type != crypto.SigTypeDelegated { - continue - } - - hash, err := ethTxHashFromSignedMessage(smsg) - if err != nil { - return err - } - - err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid()) - if err != nil { - return err - } - } - } - - return nil -} - -func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) { - if msg.Signature.Type != crypto.SigTypeDelegated { - return - } - - ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) - if err != nil { - log.Errorf("error converting filecoin message to eth tx: %s", err) - return - } - - txHash, err := ethTx.TxHash() - if err != nil { - log.Errorf("error hashing transaction: %s", err) - return - } - - err = m.TransactionHashLookup.UpsertHash(txHash, msg.Cid()) - if err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) - return - } -} - -func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) { - for { - select { - case <-ctx.Done(): - return - case u := <-ch: - if u.Type != api.MpoolAdd { - continue - } - - manager.ProcessSignedMessage(ctx, u.Message) - } - } -} - -func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) { - if retentionDays == 0 { - return - } - - gcPeriod := 1 * time.Hour - for { - entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays) - if err != nil { - log.Errorf("error garbage collecting eth transaction hash database: %s", err) - } - log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted) - time.Sleep(gcPeriod) - } -} diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 3b02be1c4e0..23c5607b04f 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -2,11 +2,9 @@ package modules import ( "context" - "path/filepath" "time" "go.uber.org/fx" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -18,6 +16,7 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -95,40 +94,17 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me } } -func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { +func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, + *stmgr.StateManager, EventHelperAPI, full.ChainAPI, chainindex.Indexer) (*filter.EventFilterManager, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, + evapi EventHelperAPI, chainapi full.ChainAPI, ci chainindex.Indexer) (*filter.EventFilterManager, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Enable indexing of actor events - var eventIndex *filter.EventIndex - if !cfg.DisableHistoricFilterAPI { - var dbPath string - if cfg.DatabasePath == "" { - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, xerrors.Errorf("failed to resolve event index database path: %w", err) - } - dbPath = filepath.Join(sqlitePath, filter.DefaultDbFilename) - } else { - dbPath = cfg.DatabasePath - } - - var err error - eventIndex, err = filter.NewEventIndex(ctx, dbPath, chainapi.Chain) - if err != nil { - return nil, xerrors.Errorf("failed to initialize event index database: %w", err) - } - - lc.Append(fx.Hook{ - OnStop: func(context.Context) error { - return eventIndex.Close() - }, - }) - } fm := &filter.EventFilterManager{ - ChainStore: cs, - EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true + ChainStore: cs, + ChainIndexer: ci, // TODO: // We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { diff --git a/node/modules/chain.go b/node/modules/chain.go index 54c07fdd5b4..cf088283ea5 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -21,12 +21,10 @@ import ( "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -125,7 +123,7 @@ func NetworkName(mctx helpers.MetricsCtx, ctx := helpers.LifecycleCtx(mctx, lc) - sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, index.DummyMsgIndex, chainindex.DummyIndexer) + sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, nil) if err != nil { return "", err } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index d01b9ed3be0..29d8baef17c 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -7,9 +7,14 @@ import ( "go.uber.org/fx" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -39,11 +44,27 @@ func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.Met } } -func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainindex.Indexer, evapi EventHelperAPI, mp *messagepool.MessagePool) { +func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainindex.Indexer, + evapi EventHelperAPI, mp *messagepool.MessagePool, sm *stmgr.StateManager) { ctx := helpers.LifecycleCtx(mctx, lc) lc.Append(fx.Hook{ OnStart: func(_ context.Context) error { + + indexer.SetIdToRobustAddrFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + actor, err := sm.LoadActor(ctx, idAddr, ts) + if err != nil || actor.DelegatedAddress == nil { + return idAddr, true + } + + return *actor.DelegatedAddress, true + }) + ev, err := events.NewEvents(ctx, &evapi) if err != nil { return err diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index cc092fb06c8..89bf857ed76 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -1,8 +1,6 @@ package modules import ( - "context" - "path/filepath" "time" "github.com/hashicorp/golang-lru/arc/v2" @@ -12,7 +10,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/ethhashlookup" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -29,30 +26,6 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, ethEventHandler *full.EthEventHandler, chainIndexer chainindex.Indexer) (*full.EthModule, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - dbPath := filepath.Join(sqlitePath, ethhashlookup.DefaultDbFilename) - - transactionHashLookup, err := ethhashlookup.NewTransactionHashLookup(ctx, dbPath) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return transactionHashLookup.Close() - }, - }) - - ethTxHashManager := full.EthTxHashManager{ - StateAPI: stateapi, - TransactionHashLookup: transactionHashLookup, - } // prefill the whole skiplist cache maintained internally by the GetTipsetByHeight go func() { @@ -65,15 +38,7 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep log.Infof("Prefilling GetTipsetByHeight done in %s", time.Since(start)) }() - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - - go full.EthTxHashGC(ctx, cfg.EthTxHashMappingLifetimeDays, ðTxHashManager) - - return nil - }, - }) - + var err error var blkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] var blkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] if cfg.EthBlkCacheSize > 0 { @@ -99,7 +64,6 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep SyncAPI: syncapi, EthEventHandler: ethEventHandler, - EthTxHashManager: ðTxHashManager, EthTraceFilterMaxResults: cfg.EthTraceFilterMaxResults, EthBlkCache: blkCache, diff --git a/node/modules/msgindex.go b/node/modules/msgindex.go deleted file mode 100644 index bdf25d3789f..00000000000 --- a/node/modules/msgindex.go +++ /dev/null @@ -1,37 +0,0 @@ -package modules - -import ( - "context" - "path/filepath" - - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" -) - -func MsgIndex(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.MsgIndex, error) { - basePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - msgIndex, err := index.NewMsgIndex(helpers.LifecycleCtx(mctx, lc), filepath.Join(basePath, index.DefaultDbFilename), cs, false) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return msgIndex.Close() - }, - }) - - return msgIndex, nil -} - -func DummyMsgIndex() index.MsgIndex { - return index.DummyMsgIndex -} diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index 4bc83dd9255..0518380ea71 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -4,7 +4,6 @@ import ( "go.uber.org/fx" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" @@ -12,8 +11,8 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, msgIndex index.MsgIndex, chainIndexer chainindex.Indexer) (*stmgr.StateManager, error) { - sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex, chainIndexer) +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, chainIndexer chainindex.Indexer) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } From e6331da7b54966b33ffeb5cfab35f71fd62fab45 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Wed, 4 Sep 2024 19:58:13 +0400 Subject: [PATCH 10/66] fix test --- itests/direct_data_onboard_verified_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/itests/direct_data_onboard_verified_test.go b/itests/direct_data_onboard_verified_test.go index 8d3d9fef78c..854a3b75c40 100644 --- a/itests/direct_data_onboard_verified_test.go +++ b/itests/direct_data_onboard_verified_test.go @@ -135,7 +135,7 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { // subscribe to actor events up until the current head initialEventsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ FromHeight: epochPtr(0), - ToHeight: epochPtr(int64(head.Height())), + ToHeight: epochPtr(int64(head.Height()) - 1), }) require.NoError(t, err) From f1f24c80e94294cccf1823b847359ebfc71d7604 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 11:19:10 +0400 Subject: [PATCH 11/66] revert deployment test changes --- itests/eth_deploy_test.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/itests/eth_deploy_test.go b/itests/eth_deploy_test.go index 07df970e376..8fb9b1515ed 100644 --- a/itests/eth_deploy_test.go +++ b/itests/eth_deploy_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "encoding/json" - "fmt" "os" "reflect" "strconv" @@ -36,7 +35,7 @@ func TestDeployment(t *testing.T) { kit.MockProofs(), kit.ThroughRPC()) - _ = ens.InterconnectAll().BeginMining(blockTime) + miners := ens.InterconnectAll().BeginMining(blockTime) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -92,22 +91,25 @@ func TestDeployment(t *testing.T) { pendingFilter, err := client.EthNewPendingTransactionFilter(ctx) require.NoError(t, err) + // Pause so we can test that everything works while the message is in the message pool. + for _, miner := range miners { + miner.Pause() + } + hash := client.EVM().SubmitTransaction(ctx, &tx) - var mpoolTx *ethtypes.EthTx - for i := 0; i < 3; i++ { - mpoolTx, err = client.EthGetTransactionByHash(ctx, &hash) - require.NoError(t, err) - if mpoolTx != nil { - break - } - time.Sleep(500 * time.Millisecond) - } + mpoolTx, err := client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) require.NotNil(t, mpoolTx) // require that the hashes are identical require.Equal(t, hash, mpoolTx.Hash) + // these fields should be nil because the tx hasn't landed on chain. + require.Nil(t, mpoolTx.BlockNumber) + require.Nil(t, mpoolTx.BlockHash) + require.Nil(t, mpoolTx.TransactionIndex) + // We should be able to get the message CID immediately. mCid, err := client.EthGetMessageCidByTransactionHash(ctx, &hash) require.NoError(t, err) @@ -124,11 +126,14 @@ func TestDeployment(t *testing.T) { require.Len(t, changes.Results, 1) require.Equal(t, hash.String(), changes.Results[0]) + // Unpause mining. + for _, miner := range miners { + miner.Restart() + } + // Wait for the message to land. - fmt.Println("waiting for message to land") _, err = client.StateWaitMsg(ctx, *mCid, 3, api.LookbackNoLimit, false) require.NoError(t, err) - fmt.Println("message landed") // Then lookup the receipt. receipt, err := client.EthGetTransactionReceipt(ctx, hash) From 3f09e1e892df19ed2f7cb62f71fcc548dcd329ea Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 11:50:40 +0400 Subject: [PATCH 12/66] revert test changes and better error handling for eth tx index lookups --- itests/eth_hash_lookup_test.go | 11 ++--------- node/impl/full/eth.go | 13 ++++++++++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/itests/eth_hash_lookup_test.go b/itests/eth_hash_lookup_test.go index 087673281fe..9324aafbb63 100644 --- a/itests/eth_hash_lookup_test.go +++ b/itests/eth_hash_lookup_test.go @@ -83,15 +83,8 @@ func TestTransactionHashLookup(t *testing.T) { hash := client.EVM().SubmitTransaction(ctx, &tx) require.Equal(t, rawTxHash, hash) - var mpoolTx *ethtypes.EthTx - for i := 0; i < 3; i++ { - mpoolTx, err = client.EthGetTransactionByHash(ctx, &hash) - require.NoError(t, err) - if mpoolTx != nil { - break - } - time.Sleep(100 * time.Millisecond) - } + mpoolTx, err := client.EthGetTransactionByHash(ctx, &hash) + require.NoError(t, err) require.Equal(t, hash, mpoolTx.Hash) // Wait for message to land on chain diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 2c8ae8d01dd..0591ed2ad98 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -356,8 +356,11 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * var err error if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil { + + if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + return nil, xerrors.Errorf("database error: %w", err) } } @@ -419,8 +422,10 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas var err error if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil { + if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + return nil, xerrors.Errorf("database error: %w", err) } } @@ -512,8 +517,10 @@ func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) - if err != nil { + if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + return nil, xerrors.Errorf("database error: %w", err) } } From c3865b725f5af8761e420e19290e29a5a535ad9b Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 12:23:19 +0400 Subject: [PATCH 13/66] fix sql statments naming convention --- chainindex/ddls.go | 18 +++++------ chainindex/events.go | 2 +- chainindex/gc.go | 2 +- chainindex/indexer.go | 75 +++++++++++++++++++++---------------------- chainindex/read.go | 2 +- 5 files changed, 48 insertions(+), 51 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index f75c003015d..02fdb23a0f1 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -9,31 +9,31 @@ const ( stmtInsertTipsetMessage = "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0" - stmtTipsetExists = "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)" - stmtTipsetUnRevert = "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?" + stmtHasTipset = "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)" + stmtUpdateTipsetToNonReverted = "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?" - stmtRevertTipset = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" + stmtUpdateTipsetToReverted = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" stmtGetMaxNonRevertedTipset = "SELECT tipset_key_cid FROM tipset_message WHERE reverted = 0 ORDER BY height DESC LIMIT 1" stmtRemoveRevertedTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ? AND reverted = 1" stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" - stmtDeleteEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` + stmtRemoveEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` - stmtRevertTipsetsFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" + stmtUpdateTipsetsToRevertedFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" stmtCountMessages = "SELECT COUNT(*) FROM tipset_message" - stmtMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` + stmtGetMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` - stmtTipsetExistsNotReverted = `SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)` + stmtHasNonRevertedTipset = `SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)` - stmtEventsRevert = `UPDATE event SET reverted = 1 WHERE message_id IN ( + stmtUpdateEventsToReverted = `UPDATE event SET reverted = 1 WHERE message_id IN ( SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? )` - stmtEventsUnRevert = `UPDATE event SET reverted = 0 WHERE message_id IN ( + stmtUpdateEventsToNonReverted = `UPDATE event SET reverted = 0 WHERE message_id IN ( SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? )` diff --git a/chainindex/events.go b/chainindex/events.go index 629f7844f63..c8880398858 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -38,7 +38,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ } // if we've already indexed events for this tipset, mark them as unreverted and return - res, err := tx.Stmt(si.eventsUnRevertStmt).ExecContext(ctx, msgTsKeyCidBytes) + res, err := tx.Stmt(si.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) if err != nil { return xerrors.Errorf("error unreverting events for tipset: %w", err) } diff --git a/chainindex/gc.go b/chainindex/gc.go index 4aea573c6fb..d4c852f6562 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -72,7 +72,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - res, err = si.deleteEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(days) + " day") + res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(days) + " day") if err != nil { log.Errorw("failed to delete eth hashes older than", "error", err) return diff --git a/chainindex/indexer.go b/chainindex/indexer.go index b4ca0cdd51d..a2177639619 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -36,19 +36,19 @@ type SqliteIndexer struct { getNonRevertedMsgInfoStmt *sql.Stmt getMsgCidFromEthHashStmt *sql.Stmt insertTipsetMessageStmt *sql.Stmt - revertTipsetStmt *sql.Stmt + updateTipsetToRevertedStmt *sql.Stmt getMaxNonRevertedTipsetStmt *sql.Stmt - tipsetExistsStmt *sql.Stmt - tipsetUnRevertStmt *sql.Stmt + hasTipsetStmt *sql.Stmt + updateTipsetToNonRevertedStmt *sql.Stmt removeRevertedTipsetsBeforeHeightStmt *sql.Stmt removeTipsetsBeforeHeightStmt *sql.Stmt - deleteEthHashesOlderThanStmt *sql.Stmt - revertTipsetsFromHeightStmt *sql.Stmt + removeEthHashesOlderThanStmt *sql.Stmt + updateTipsetsToRevertedFromHeightStmt *sql.Stmt countMessagesStmt *sql.Stmt - minNonRevertedHeightStmt *sql.Stmt - tipsetExistsNotRevertedStmt *sql.Stmt - revertEventsStmt *sql.Stmt - eventsUnRevertStmt *sql.Stmt + getMinNonRevertedHeightStmt *sql.Stmt + hasNonRevertedTipsetStmt *sql.Stmt + updateEventsToRevertedStmt *sql.Stmt + updateEventsToNonRevertedStmt *sql.Stmt getMsgIdForMsgCidAndTipsetStmt *sql.Stmt insertEventStmt *sql.Stmt insertEventEntryStmt *sql.Stmt @@ -141,7 +141,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types } // Find the minimum applied tipset in the index; this will mark the end of the reconciliation walk - row = tx.StmtContext(ctx, si.minNonRevertedHeightStmt).QueryRowContext(ctx) + row = tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) if err := row.Scan(&result); err != nil { return xerrors.Errorf("error finding boundary epoch: %w", err) } @@ -159,7 +159,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types } var exists bool - err = tx.StmtContext(ctx, si.tipsetExistsNotRevertedStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + err = tx.StmtContext(ctx, si.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) if err != nil { return xerrors.Errorf("error checking if tipset exists and is not reverted: %w", err) } @@ -186,7 +186,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types // mark all tipsets from the boundary epoch in the Index as reverted as they are not in the current canonical chain log.Infof("Marking tipsets as reverted from height %d", boundaryEpoch) - _, err := tx.StmtContext(ctx, si.revertTipsetsFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) + _, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) if err != nil { return xerrors.Errorf("error marking tipsets as reverted: %w", err) } @@ -247,17 +247,17 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "insertTipsetMessageStmt", err) } - si.tipsetExistsStmt, err = si.db.Prepare(stmtTipsetExists) + si.hasTipsetStmt, err = si.db.Prepare(stmtHasTipset) if err != nil { - return xerrors.Errorf("prepare %s: %w", "tipsetExistsStmt", err) + return xerrors.Errorf("prepare %s: %w", "hasTipsetStmt", err) } - si.tipsetUnRevertStmt, err = si.db.Prepare(stmtTipsetUnRevert) + si.updateTipsetToNonRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToNonReverted) if err != nil { - return xerrors.Errorf("prepare %s: %w", "tipsetUnRevertStmt", err) + return xerrors.Errorf("prepare %s: %w", "updateTipsetToNonRevertedStmt", err) } - si.revertTipsetStmt, err = si.db.Prepare(stmtRevertTipset) + si.updateTipsetToRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToReverted) if err != nil { - return xerrors.Errorf("prepare %s: %w", "revertTipsetStmt", err) + return xerrors.Errorf("prepare %s: %w", "updateTipsetToRevertedStmt", err) } si.getMaxNonRevertedTipsetStmt, err = si.db.Prepare(stmtGetMaxNonRevertedTipset) if err != nil { @@ -271,39 +271,36 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "removeTipsetsBeforeHeightStmt", err) } - si.deleteEthHashesOlderThanStmt, err = si.db.Prepare(stmtDeleteEthHashesOlderThan) + si.removeEthHashesOlderThanStmt, err = si.db.Prepare(stmtRemoveEthHashesOlderThan) if err != nil { - return xerrors.Errorf("prepare %s: %w", "deleteEthHashesOlderThanStmt", err) + return xerrors.Errorf("prepare %s: %w", "removeEthHashesOlderThanStmt", err) } - si.revertTipsetsFromHeightStmt, err = si.db.Prepare(stmtRevertTipsetsFromHeight) + si.updateTipsetsToRevertedFromHeightStmt, err = si.db.Prepare(stmtUpdateTipsetsToRevertedFromHeight) if err != nil { - return xerrors.Errorf("prepare %s: %w", "revertTipsetsFromHeightStmt", err) + return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) } si.countMessagesStmt, err = si.db.Prepare(stmtCountMessages) if err != nil { return xerrors.Errorf("prepare %s: %w", "countMessagesStmt", err) } - si.minNonRevertedHeightStmt, err = si.db.Prepare(stmtMinNonRevertedHeight) + si.getMinNonRevertedHeightStmt, err = si.db.Prepare(stmtGetMinNonRevertedHeight) if err != nil { - return xerrors.Errorf("prepare %s: %w", "minNonRevertedHeightStmt", err) + return xerrors.Errorf("prepare %s: %w", "getMinNonRevertedHeightStmt", err) } - si.tipsetExistsNotRevertedStmt, err = si.db.Prepare(stmtTipsetExistsNotReverted) + si.hasNonRevertedTipsetStmt, err = si.db.Prepare(stmtHasNonRevertedTipset) if err != nil { - return xerrors.Errorf("prepare %s: %w", "tipsetExistsNotRevertedStmt", err) + return xerrors.Errorf("prepare %s: %w", "hasNonRevertedTipsetStmt", err) } - si.tipsetExistsNotRevertedStmt, err = si.db.Prepare(stmtTipsetExistsNotReverted) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "tipsetExistsNotRevertedStmt", err) - } - si.eventsUnRevertStmt, err = si.db.Prepare(stmtEventsUnRevert) + + si.updateEventsToNonRevertedStmt, err = si.db.Prepare(stmtUpdateEventsToNonReverted) if err != nil { - return xerrors.Errorf("prepare %s: %w", "eventsUnRevertStmt", err) + return xerrors.Errorf("prepare %s: %w", "updateEventsToNonRevertedStmt", err) } - si.revertEventsStmt, err = si.db.Prepare(stmtEventsRevert) + si.updateEventsToRevertedStmt, err = si.db.Prepare(stmtUpdateEventsToReverted) if err != nil { - return xerrors.Errorf("prepare %s: %w", "revertEventsStmt", err) + return xerrors.Errorf("prepare %s: %w", "updateEventsToRevertedStmt", err) } si.getMsgIdForMsgCidAndTipsetStmt, err = si.db.Prepare(stmtGetMsgIdForMsgCidAndTipset) @@ -424,13 +421,13 @@ func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) err } err = withTx(ctx, si.db, func(tx *sql.Tx) error { - if _, err := tx.Stmt(si.revertTipsetStmt).ExecContext(ctx, revertTsKeyCid); err != nil { + if _, err := tx.Stmt(si.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { return xerrors.Errorf("error marking tipset %s as reverted: %w", revertTsKeyCid, err) } // events are indexed against the message inclusion tipset, not the message execution tipset. // So we need to revert the events for the message inclusion tipset. - if _, err := tx.Stmt(si.revertEventsStmt).ExecContext(ctx, eventTsKeyCid); err != nil { + if _, err := tx.Stmt(si.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { return xerrors.Errorf("error reverting events for tipset %s: %w", eventTsKeyCid, err) } @@ -527,16 +524,16 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte, parentsKeyCidBytes []byte) (bool, error) { // Check if the tipset already exists var exists bool - if err := tx.Stmt(si.tipsetExistsStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + if err := tx.Stmt(si.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { return false, xerrors.Errorf("error checking if tipset exists: %w", err) } if exists { - if _, err := tx.Stmt(si.tipsetUnRevertStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { + if _, err := tx.Stmt(si.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { return false, xerrors.Errorf("error restoring tipset: %w", err) } // also mark all the events in the parent as not reverted - if _, err := tx.Stmt(si.eventsUnRevertStmt).ExecContext(ctx, parentsKeyCidBytes); err != nil { + if _, err := tx.Stmt(si.updateEventsToNonRevertedStmt).ExecContext(ctx, parentsKeyCidBytes); err != nil { return false, xerrors.Errorf("error unreverting events: %w", err) } diff --git a/chainindex/read.go b/chainindex/read.go index 27acb557784..3162b47fc41 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -94,7 +94,7 @@ func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, t func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCid []byte) (bool, error) { var exists bool - err := si.tipsetExistsStmt.QueryRowContext(ctx, tsKeyCid).Scan(&exists) + err := si.hasTipsetStmt.QueryRowContext(ctx, tsKeyCid).Scan(&exists) if err != nil { return false, xerrors.Errorf("error checking if tipset exists: %w", err) } From 8ec7cd20beed73aa05ba5420d2ee620b1eb5884d Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 12:39:48 +0400 Subject: [PATCH 14/66] address review for Index GC --- chainindex/gc.go | 25 +++++++++---------------- chainindex/indexer.go | 18 +++++++++--------- node/config/types.go | 4 ++-- node/modules/chainindex.go | 2 +- 4 files changed, 21 insertions(+), 28 deletions(-) diff --git a/chainindex/gc.go b/chainindex/gc.go index d4c852f6562..c7a5efdcf3b 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -7,6 +7,7 @@ import ( logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" ) @@ -25,7 +26,7 @@ func (si *SqliteIndexer) gcLoop() { cleanupTicker := time.NewTicker(cleanupInterval) defer cleanupTicker.Stop() - for { + for si.ctx.Err() == nil { select { case <-cleanupTicker.C: si.cleanupRevertedTipsets(si.ctx) @@ -37,13 +38,14 @@ func (si *SqliteIndexer) gcLoop() { } func (si *SqliteIndexer) gc(ctx context.Context) { - if si.gcRetentionEpochs <= 0 { + if si.gcRetentionDays <= 0 { return } - head := si.cs.GetHeaviestTipSet().Height() - removeEpoch := int64(head) - si.gcRetentionEpochs + head := si.cs.GetHeaviestTipSet() + retentionEpochs := si.gcRetentionDays * builtin.EpochsInDay + removeEpoch := int64(head.Height()) - retentionEpochs - 10 // 10 is for some grace period if removeEpoch <= 0 { return } @@ -64,15 +66,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { // Also GC eth hashes - // Calculate the number of days - days := int((si.gcRetentionEpochs * 30) / (24 * 60 * 60)) - - // Ensure we have at least 1 day for GC - if days < 1 { - return - } - - res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(days) + " day") + res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(si.gcRetentionDays)) + " day") if err != nil { log.Errorw("failed to delete eth hashes older than", "error", err) return @@ -88,9 +82,8 @@ func (si *SqliteIndexer) gc(ctx context.Context) { } func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { - head := si.cs.GetHeaviestTipSet().Height() - - finalEpoch := (head - policy.ChainFinality) - 10 // 10 is for some grace period + head := si.cs.GetHeaviestTipSet() + finalEpoch := (head.Height() - policy.ChainFinality) - 10 // 10 is for some grace period if finalEpoch <= 0 { return } diff --git a/chainindex/indexer.go b/chainindex/indexer.go index a2177639619..8687334845a 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -53,7 +53,7 @@ type SqliteIndexer struct { insertEventStmt *sql.Stmt insertEventEntryStmt *sql.Stmt - gcRetentionEpochs int64 + gcRetentionDays int64 mu sync.Mutex updateSubs map[uint64]*updateSub @@ -63,7 +63,7 @@ type SqliteIndexer struct { closed bool } -func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si *SqliteIndexer, err error) { +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64) (si *SqliteIndexer, err error) { db, _, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) @@ -84,13 +84,13 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64) (si * } si = &SqliteIndexer{ - ctx: ctx, - cancel: cancel, - db: db, - cs: cs, - updateSubs: make(map[uint64]*updateSub), - subIdCounter: 0, - gcRetentionEpochs: gcRetentionEpochs, + ctx: ctx, + cancel: cancel, + db: db, + cs: cs, + updateSubs: make(map[uint64]*updateSub), + subIdCounter: 0, + gcRetentionDays: gcRetentionDays, } if err = si.prepareStatements(); err != nil { return nil, xerrors.Errorf("failed to prepare statements: %w", err) diff --git a/node/config/types.go b/node/config/types.go index 913c9daec85..13f35747f06 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -629,10 +629,10 @@ type IndexConfig struct { // EnableMsgIndex enables indexing of messages on chain. EnableMsgIndex bool - // GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. + // GCRetentionDays defines the number of days for which data is retained in the Indexer. // During the garbage collection (GC) process, data older than this retention period is pruned. // A value of 0 disables GC, retaining all historical data. - GCRetentionEpochs int64 + GCRetentionDays int64 } type HarmonyDB struct { diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 29d8baef17c..f8bf71bdaed 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -29,7 +29,7 @@ func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.Met } // TODO Implement config driven auto-backfilling - chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), cs, cfg.GCRetentionEpochs) + chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), cs, cfg.GCRetentionDays) if err != nil { return nil, err } From a1c5201a55615c889d7c57fa57138c780808c1af Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 16:35:25 +0400 Subject: [PATCH 15/66] more changes as per review --- chainindex/ddls.go | 2 +- chainindex/events.go | 82 +++++++++++++++++++++++++++++++---------- chainindex/gc.go | 12 +++--- chainindex/helpers.go | 3 +- chainindex/indexer.go | 46 ++++++++++++----------- chainindex/interface.go | 1 + chainindex/read.go | 69 ++++++++++++++++++---------------- 7 files changed, 134 insertions(+), 81 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 02fdb23a0f1..501f8a57917 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -23,7 +23,7 @@ const ( stmtUpdateTipsetsToRevertedFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" - stmtCountMessages = "SELECT COUNT(*) FROM tipset_message" + stmtIsTipsetMessageEmpty = "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)" stmtGetMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` diff --git a/chainindex/events.go b/chainindex/events.go index c8880398858..7e0ef78cd3b 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -181,6 +181,59 @@ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs return ems, nil } +// checkTipsetIndexedStatus verifies if a specific tipset is indexed based on the EventFilter. +// It returns nil if the tipset is indexed, ErrNotFound if it's not indexed or not specified, +func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventFilter) error { + var tipsetKeyCid []byte + var err error + + // Determine the tipset to check based on the filter + switch { + case f.TipsetCid != cid.Undef: + tipsetKeyCid = f.TipsetCid.Bytes() + case f.MinHeight >= 0 && f.MinHeight == f.MaxHeight: + tipsetKeyCid, err = si.getTipsetKeyCidByHeight(ctx, f.MinHeight) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid by height: %w", err) + } + default: + // Filter doesn't specify a specific tipset + return ErrNotFound + } + + // If we couldn't determine a specific tipset, return ErrNotFound + if tipsetKeyCid == nil { + return ErrNotFound + } + + // Check if the determined tipset is indexed + exists, err := si.isTipsetIndexed(ctx, tipsetKeyCid) + if err != nil { + return xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } + + if exists { + return nil // Tipset is indexed + } + + return ErrNotFound // Tipset is not indexed +} + +// getTipsetKeyCidByHeight retrieves the tipset key CID for a given height. +// It returns nil if no tipset is found at the exact height. +func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi.ChainEpoch) ([]byte, error) { + ts, err := si.cs.GetTipsetByHeight(ctx, height, nil, false) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by height: %w", err) + } + + if ts.Height() != height { + return nil, nil // No tipset at exact height + } + + return toTipsetKeyCidBytes(ts) +} + // GetEventsForFilter returns matching events for the given filter // prefillFilter fills a filter's collection of events from the historic index // Returns nil, nil if the filter has no matching events @@ -201,31 +254,20 @@ func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, q, err := stmt.QueryContext(ctx, values...) if err == sql.ErrNoRows { - // wait for head to be indexed and retry - err = si.waitTillHeadIndexedAndApply(ctx, func() error { - q, err = stmt.QueryContext(ctx, values...) - return err - }) + // did not find events, but may be in head, so wait for it and check again + if err := si.waitTillHeadIndexed(ctx); err != nil { + return nil, xerrors.Errorf("error waiting for head to be indexed: %w", err) + } + q, err = stmt.QueryContext(ctx, values...) } if err != nil { + // if no rows are found, we should differentiate between no events for the tipset(which is valid and can happen) + // and the tipset not being indexed if errors.Is(err, sql.ErrNoRows) { - // if user is asking for a specific tipset, differentiate between no events for the tipset and the absence of the tipset itself - if f.TipsetCid != cid.Undef { - exists, err := si.isTipsetIndexed(ctx, f.TipsetCid.Bytes()) - if err != nil { - return nil, xerrors.Errorf("error checking if tipset exists: %w", err) - } - // we have the tipset indexed but simply dont have events for it i.e. no events matching the given filter - if exists { - return nil, nil - } - } - - // we don't have the tipset indexed - return nil, ErrNotFound + return nil, si.checkTipsetIndexedStatus(ctx, f) } - return nil, xerrors.Errorf("exec prefill query: %w", err) + return nil, xerrors.Errorf("failed to query events: %w", err) } defer func() { _ = q.Close() }() diff --git a/chainindex/gc.go b/chainindex/gc.go index c7a5efdcf3b..f444b8625f4 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -45,14 +45,14 @@ func (si *SqliteIndexer) gc(ctx context.Context) { head := si.cs.GetHeaviestTipSet() retentionEpochs := si.gcRetentionDays * builtin.EpochsInDay - removeEpoch := int64(head.Height()) - retentionEpochs - 10 // 10 is for some grace period - if removeEpoch <= 0 { + removalEpoch := int64(head.Height()) - retentionEpochs - 10 // 10 is for some grace period + if removalEpoch <= 0 { return } - res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removeEpoch) + res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) if err != nil { - log.Errorw("failed to remove reverted tipsets before height", "height", removeEpoch, "error", err) + log.Errorw("failed to remove reverted tipsets before height", "height", removalEpoch, "error", err) return } @@ -62,7 +62,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - log.Infow("gc'd tipsets", "height", removeEpoch, "nRows", rows) + log.Infow("gc'd tipsets", "height", removalEpoch, "nRows", rows) // Also GC eth hashes @@ -78,7 +78,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - log.Infow("gc'd eth hashes", "height", removeEpoch, "nRows", rows) + log.Infow("gc'd eth hashes", "height", removalEpoch, "nRows", rows) } func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { diff --git a/chainindex/helpers.go b/chainindex/helpers.go index 15edf5e7d26..d9ec9b58b69 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -55,7 +55,8 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error } func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer Indexer) { - for { + + for ctx.Err() == nil { select { case <-ctx.Done(): return diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 8687334845a..03fe86aa251 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -44,7 +44,7 @@ type SqliteIndexer struct { removeTipsetsBeforeHeightStmt *sql.Stmt removeEthHashesOlderThanStmt *sql.Stmt updateTipsetsToRevertedFromHeightStmt *sql.Stmt - countMessagesStmt *sql.Stmt + isTipsetMessageEmptyStmt *sql.Stmt getMinNonRevertedHeightStmt *sql.Stmt hasNonRevertedTipsetStmt *sql.Stmt updateEventsToRevertedStmt *sql.Stmt @@ -131,19 +131,20 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types } return withTx(ctx, si.db, func(tx *sql.Tx) error { - row := tx.StmtContext(ctx, si.countMessagesStmt).QueryRowContext(ctx) - var result int64 - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) + var isEmpty bool + err := tx.StmtContext(ctx, si.isTipsetMessageEmptyStmt).QueryRowContext(ctx).Scan(&isEmpty) + if err != nil { + return xerrors.Errorf("failed to check if tipset message is empty: %w", err) } - if result == 0 { + if isEmpty { return nil } - // Find the minimum applied tipset in the index; this will mark the end of the reconciliation walk - row = tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) + // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk + var result int64 + row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error finding boundary epoch: %w", err) + return xerrors.Errorf("failed to find boundary epoch: %w", err) } boundaryEpoch := abi.ChainEpoch(result) @@ -155,13 +156,13 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types for curTs != nil && curTs.Height() >= boundaryEpoch { tsKeyCidBytes, err := toTipsetKeyCidBytes(curTs) if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) + return xerrors.Errorf("failed to compute tipset cid: %w", err) } var exists bool err = tx.StmtContext(ctx, si.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) if err != nil { - return xerrors.Errorf("error checking if tipset exists and is not reverted: %w", err) + return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) } if exists { @@ -176,7 +177,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types parents := curTs.Parents() curTs, err = si.cs.GetTipSetFromKey(ctx, parents) if err != nil { - return xerrors.Errorf("error walking chain: %w", err) + return xerrors.Errorf("failed to walk chain: %w", err) } } @@ -186,9 +187,9 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types // mark all tipsets from the boundary epoch in the Index as reverted as they are not in the current canonical chain log.Infof("Marking tipsets as reverted from height %d", boundaryEpoch) - _, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) + _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) if err != nil { - return xerrors.Errorf("error marking tipsets as reverted: %w", err) + return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } // Now apply all missing tipsets in reverse order i,e, we apply tipsets in [last matching tipset b/w index and canonical chain, @@ -197,10 +198,10 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types curTs := tipsetStack[i] parentTs, err := si.cs.GetTipSetFromKey(ctx, curTs.Parents()) if err != nil { - return xerrors.Errorf("error getting parent tipset: %w", err) + return xerrors.Errorf("failed to get parent tipset: %w", err) } if err := si.indexTipset(ctx, tx, curTs, parentTs, true); err != nil { - return xerrors.Errorf("error indexing tipset: %w", err) + return xerrors.Errorf("failed to index tipset: %w", err) } } @@ -279,10 +280,12 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) } - si.countMessagesStmt, err = si.db.Prepare(stmtCountMessages) + + si.isTipsetMessageEmptyStmt, err = si.db.Prepare(stmtIsTipsetMessageEmpty) if err != nil { - return xerrors.Errorf("prepare %s: %w", "countMessagesStmt", err) + return xerrors.Errorf("prepare %s: %w", "isTipsetMessageEmptyStmt", err) } + si.getMinNonRevertedHeightStmt, err = si.db.Prepare(stmtGetMinNonRevertedHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "getMinNonRevertedHeightStmt", err) @@ -333,6 +336,9 @@ func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.Eth } func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + if msg.Signature.Type != crypto.SigTypeDelegated { + return nil + } si.closeLk.RLock() if si.closed { return ErrClosed @@ -345,10 +351,6 @@ func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.Sign } func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg *types.SignedMessage) error { - if msg.Signature.Type != crypto.SigTypeDelegated { - return nil - } - ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) if err != nil { return xerrors.Errorf("error converting filecoin message to eth tx: %w", err) diff --git a/chainindex/interface.go b/chainindex/interface.go index 0dcc44771a3..013a3650402 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -75,6 +75,7 @@ type ChainStore interface { GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) ActorStore(ctx context.Context) adt.Store + GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) } var _ ChainStore = (*store.ChainStore)(nil) diff --git a/chainindex/read.go b/chainindex/read.go index 3162b47fc41..d512ddace56 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -13,9 +13,7 @@ import ( "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var ( - headIndexedWaitTimeout = 5 * time.Second -) +const headIndexedWaitTimeout = 5 * time.Second func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { si.closeLk.RLock() @@ -26,18 +24,10 @@ func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.Eth var msgCidBytes []byte - err := si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) - if err == sql.ErrNoRows { - err = si.waitTillHeadIndexedAndApply(ctx, func() error { - return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) - }) - } - - if err != nil { - if err == sql.ErrNoRows { - return cid.Undef, ErrNotFound - } - return cid.Undef, xerrors.Errorf("failed to get message CID from eth hash: %w", err) + if err := si.readWithHeadIndexWait(ctx, func() error { + return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) + }); err != nil { + return cid.Undef, err } msgCid, err := cid.Cast(msgCidBytes) @@ -62,18 +52,10 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M var tipsetKeyCidBytes []byte var height int64 - err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) - if err == sql.ErrNoRows { - err = si.waitTillHeadIndexedAndApply(ctx, func() error { - return si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) - }) - } - - if err != nil { - if err == sql.ErrNoRows { - return nil, ErrNotFound - } - return nil, xerrors.Errorf("failed to get message info: %w", err) + if err := si.readWithHeadIndexWait(ctx, func() error { + return si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height) + }); err != nil { + return nil, err } tipsetKey, err := cid.Cast(tipsetKeyCidBytes) @@ -88,6 +70,30 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M }, nil } +// This function attempts to read data using the provided readFunc. +// If the initial read returns no rows, it waits for the head to be indexed +// and tries again. This ensures that the most up-to-date data is checked. +// If no data is found after the second attempt, it returns ErrNotFound. +func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc func() error) error { + err := readFunc() + if err == sql.ErrNoRows { + if err := si.waitTillHeadIndexed(ctx); err != nil { + return xerrors.Errorf("error waiting for head to be indexed: %w", err) + } + // not found, but may be in latest head, so wait for it and check again + err = readFunc() + } + + if err != nil { + if err == sql.ErrNoRows { + return ErrNotFound + } + return xerrors.Errorf("failed to get message info: %w", err) + } + + return nil +} + func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, tipsetKeyCidBytes *[]byte, height *int64) error { return si.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) } @@ -101,7 +107,7 @@ func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCid []byte) ( return exists, nil } -func (si *SqliteIndexer) waitTillHeadIndexedAndApply(ctx context.Context, applyFn func() error) error { +func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, headIndexedWaitTimeout) defer cancel() @@ -115,20 +121,20 @@ func (si *SqliteIndexer) waitTillHeadIndexedAndApply(ctx context.Context, applyF if exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes); err != nil { return xerrors.Errorf("error checking if tipset exists: %w", err) } else if exists { - return applyFn() + return nil } // wait till it is indexed subCh, unsubFn := si.subscribeUpdates() defer unsubFn() - for { + for ctx.Err() == nil { exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) if err != nil { return xerrors.Errorf("error checking if tipset exists: %w", err) } if exists { - return applyFn() + return nil } select { @@ -138,4 +144,5 @@ func (si *SqliteIndexer) waitTillHeadIndexedAndApply(ctx context.Context, applyF return ctx.Err() } } + return ctx.Err() } From e0831eed87f068c8388335e76703ff9a27e4e446 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 16:47:03 +0400 Subject: [PATCH 16/66] changes as per review --- chainindex/ddls.go | 2 +- chainindex/indexer.go | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 501f8a57917..7aa06957de3 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -23,7 +23,7 @@ const ( stmtUpdateTipsetsToRevertedFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" - stmtIsTipsetMessageEmpty = "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)" + stmtIsTipsetMessageNonEmpty = "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)" stmtGetMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 03fe86aa251..fdcf702783d 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -44,7 +44,7 @@ type SqliteIndexer struct { removeTipsetsBeforeHeightStmt *sql.Stmt removeEthHashesOlderThanStmt *sql.Stmt updateTipsetsToRevertedFromHeightStmt *sql.Stmt - isTipsetMessageEmptyStmt *sql.Stmt + isTipsetMessageNonEmptyStmt *sql.Stmt getMinNonRevertedHeightStmt *sql.Stmt hasNonRevertedTipsetStmt *sql.Stmt updateEventsToRevertedStmt *sql.Stmt @@ -131,12 +131,12 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types } return withTx(ctx, si.db, func(tx *sql.Tx) error { - var isEmpty bool - err := tx.StmtContext(ctx, si.isTipsetMessageEmptyStmt).QueryRowContext(ctx).Scan(&isEmpty) + var hasTipset bool + err := tx.StmtContext(ctx, si.isTipsetMessageNonEmptyStmt).QueryRowContext(ctx).Scan(&hasTipset) if err != nil { return xerrors.Errorf("failed to check if tipset message is empty: %w", err) } - if isEmpty { + if !hasTipset { return nil } @@ -281,9 +281,9 @@ func (si *SqliteIndexer) prepareStatements() error { return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) } - si.isTipsetMessageEmptyStmt, err = si.db.Prepare(stmtIsTipsetMessageEmpty) + si.isTipsetMessageNonEmptyStmt, err = si.db.Prepare(stmtIsTipsetMessageNonEmpty) if err != nil { - return xerrors.Errorf("prepare %s: %w", "isTipsetMessageEmptyStmt", err) + return xerrors.Errorf("prepare %s: %w", "isTipsetMessageNonEmptyStmt", err) } si.getMinNonRevertedHeightStmt, err = si.db.Prepare(stmtGetMinNonRevertedHeight) @@ -456,11 +456,9 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. } parentsKeyCidBytes := parentsKeyCid.Bytes() - restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes, parentsKeyCidBytes) - if err != nil { + if restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes, parentsKeyCidBytes); err != nil { return xerrors.Errorf("error restoring tipset: %w", err) - } - if restored { + } else if restored { return nil } From 06ca87a4b55070361a7353a26edaa6871b6926ed Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 17:54:06 +0400 Subject: [PATCH 17/66] fix config --- chainindex/helpers.go | 2 +- chainindex/indexer.go | 47 +++++++++++++--------- documentation/en/default-lotus-config.toml | 37 ++++------------- node/builder_chain.go | 6 ++- node/config/def.go | 6 ++- node/config/doc_gen.go | 31 ++++---------- node/config/types.go | 29 ++++++------- node/modules/chainindex.go | 9 ++++- 8 files changed, 72 insertions(+), 95 deletions(-) diff --git a/chainindex/helpers.go b/chainindex/helpers.go index d9ec9b58b69..469386968e6 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -19,7 +19,7 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error } } - si, err := NewSqliteIndexer(path, cs, 0) + si, err := NewSqliteIndexer(path, cs, 0, false) if err != nil { return xerrors.Errorf("failed to create sqlite indexer: %w", err) } diff --git a/chainindex/indexer.go b/chainindex/indexer.go index fdcf702783d..3a055ba9ba5 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -53,7 +53,8 @@ type SqliteIndexer struct { insertEventStmt *sql.Stmt insertEventEntryStmt *sql.Stmt - gcRetentionDays int64 + gcRetentionDays int64 + reconcileEmptyIndex bool mu sync.Mutex updateSubs map[uint64]*updateSub @@ -63,7 +64,7 @@ type SqliteIndexer struct { closed bool } -func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64) (si *SqliteIndexer, err error) { +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconcileEmptyIndex bool) (si *SqliteIndexer, err error) { db, _, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) @@ -84,13 +85,14 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64) (si *Sq } si = &SqliteIndexer{ - ctx: ctx, - cancel: cancel, - db: db, - cs: cs, - updateSubs: make(map[uint64]*updateSub), - subIdCounter: 0, - gcRetentionDays: gcRetentionDays, + ctx: ctx, + cancel: cancel, + db: db, + cs: cs, + updateSubs: make(map[uint64]*updateSub), + subIdCounter: 0, + gcRetentionDays: gcRetentionDays, + reconcileEmptyIndex: reconcileEmptyIndex, } if err = si.prepareStatements(); err != nil { return nil, xerrors.Errorf("failed to prepare statements: %w", err) @@ -119,14 +121,14 @@ func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddr // // This function is crucial for maintaining index integrity, especially after chain reorgs. // It ensures that the index accurately reflects the current state of the blockchain. -func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error { +func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { si.closeLk.RLock() if si.closed { return ErrClosed } si.closeLk.RUnlock() - if currHead == nil { + if head == nil { return nil } @@ -147,13 +149,18 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types return xerrors.Errorf("failed to find boundary epoch: %w", err) } - boundaryEpoch := abi.ChainEpoch(result) + reconciliationEpoch := abi.ChainEpoch(result) var tipsetStack []*types.TipSet - curTs := currHead - log.Infof("Starting chain reconciliation from height %d", currHead.Height()) - for curTs != nil && curTs.Height() >= boundaryEpoch { + curTs := head + log.Infof("Starting chain reconciliation from height %d", head.Height()) + + // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset + // in the db so we know where to start reconciliation from + // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted + // All tpsets that exist in the canonical chain but not in the db are then applied + for curTs != nil && curTs.Height() >= reconciliationEpoch { tsKeyCidBytes, err := toTipsetKeyCidBytes(curTs) if err != nil { return xerrors.Errorf("failed to compute tipset cid: %w", err) @@ -167,8 +174,8 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types if exists { // found it! - boundaryEpoch = curTs.Height() + 1 - log.Infof("Found matching tipset at height %d, setting boundary epoch to %d", curTs.Height(), boundaryEpoch) + reconciliationEpoch = curTs.Height() + 1 + log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", curTs.Height(), reconciliationEpoch) break } tipsetStack = append(tipsetStack, curTs) @@ -185,9 +192,9 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, currHead *types log.Warn("ReconcileWithChain reached genesis without finding matching tipset") } - // mark all tipsets from the boundary epoch in the Index as reverted as they are not in the current canonical chain - log.Infof("Marking tipsets as reverted from height %d", boundaryEpoch) - _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(boundaryEpoch)) + // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain + log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) + _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) if err != nil { return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index bef6fc66103..49eff4197ba 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -253,14 +253,6 @@ # env var: LOTUS_EVENTS_DISABLEREALTIMEFILTERAPI #DisableRealTimeFilterAPI = false - # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_EVENTS_DISABLEHISTORICFILTERAPI - #DisableHistoricFilterAPI = false - # EnableActorEventsAPI enables the Actor events API that enables clients to consume events # emitted by (smart contracts + built-in Actors). # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be @@ -300,31 +292,16 @@ # env var: LOTUS_EVENTS_MAXFILTERHEIGHTRANGE #MaxFilterHeightRange = 2880 - # DatabasePath is the full path to a sqlite database that will be used to index actor events to - # support the historic filter APIs. If the database does not exist it will be created. The directory containing - # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - # relative to the CWD (current working directory). - # - # type: string - # env var: LOTUS_EVENTS_DATABASEPATH - #DatabasePath = "" +[ChainIndexer] + # env var: LOTUS_CHAININDEXER_DISABLECHAININDEXER + #DisableChainIndexer = false -[Index] - # EXPERIMENTAL FEATURE. USE WITH CAUTION - # EnableMsgIndex enables indexing of messages on chain. - # - # type: bool - # env var: LOTUS_INDEX_ENABLEMSGINDEX - #EnableMsgIndex = false + # env var: LOTUS_CHAININDEXER_GCRETENTIONDAYS + #GCRetentionDays = 0 - # GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. - # During the garbage collection (GC) process, data older than this retention period is pruned. - # A value of 0 disables GC, retaining all historical data. - # - # type: int64 - # env var: LOTUS_INDEX_GCRETENTIONEPOCHS - #GCRetentionEpochs = 0 + # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX + #ReconcileEmptyIndex = false [FaultReporter] diff --git a/node/builder_chain.go b/node/builder_chain.go index 037c56af0ad..a6e80ff7fa1 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -282,8 +282,10 @@ func ConfigFullNode(c interface{}) Option { Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), - Override(new(chainindex.Indexer), modules.ChainIndexer(cfg.Index)), - Override(InitChainIndexerKey, modules.InitChainIndexer), + Override(new(chainindex.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), + If(!cfg.ChainIndexer.DisableChainIndexer, + Override(InitChainIndexerKey, modules.InitChainIndexer), + ), ) } diff --git a/node/config/def.go b/node/config/def.go index 527d72aa04c..d8c1d4ab3a6 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -89,13 +89,17 @@ func DefaultFullNode() *FullNode { }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, - DisableHistoricFilterAPI: false, EnableActorEventsAPI: false, FilterTTL: Duration(time.Hour * 1), MaxFilters: 100, MaxFilterResults: 10000, MaxFilterHeightRange: 2880, // conservative limit of one day }, + ChainIndexer: ChainIndexerConfig{ + DisableChainIndexer: false, + GCRetentionDays: 0, + ReconcileEmptyIndex: false, + }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 8f91eee830e..aded56f986b 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -119,14 +119,6 @@ your node if metadata log is disabled`, Type: "bool", Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, - }, - { - Name: "DisableHistoricFilterAPI", - Type: "bool", - - Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events -that occurred in the past. HistoricFilterAPI maintains a queryable index of events. The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, }, { @@ -168,15 +160,6 @@ of filters per connection.`, Comment: `MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying the entire chain)`, }, - { - Name: "DatabasePath", - Type: "string", - - Comment: `DatabasePath is the full path to a sqlite database that will be used to index actor events to -support the historic filter APIs. If the database does not exist it will be created. The directory containing -the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as -relative to the CWD (current working directory).`, - }, }, "FaultReporterConfig": { { @@ -337,19 +320,21 @@ in a cluster. Only 1 is required`, }, "IndexConfig": { { - Name: "EnableMsgIndex", + Name: "DisableChainIndexer", Type: "bool", - Comment: `EXPERIMENTAL FEATURE. USE WITH CAUTION -EnableMsgIndex enables indexing of messages on chain.`, + Comment: `DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. +Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. +Only turn it off if you know what you are doing.`, }, { - Name: "GCRetentionEpochs", + Name: "GCRetentionDays", Type: "int64", - Comment: `GCRetentionEpochs defines the number of epochs for which data is retained in the Indexer. + Comment: `GCRetentionDays defines the number of days for which data is retained in the Indexer. During the garbage collection (GC) process, data older than this retention period is pruned. -A value of 0 disables GC, retaining all historical data.`, +A value of 0 disables GC, retaining all historical data. +Default is 0 i.e. GC is disabled by default.`, }, }, "JournalConfig": { diff --git a/node/config/types.go b/node/config/types.go index 13f35747f06..804a2617cb1 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -25,7 +25,7 @@ type FullNode struct { Chainstore Chainstore Fevm FevmConfig Events EventsConfig - Index IndexConfig + ChainIndexer ChainIndexerConfig FaultReporter FaultReporterConfig } @@ -583,11 +583,6 @@ type EventsConfig struct { // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableRealTimeFilterAPI bool - // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - DisableHistoricFilterAPI bool - // EnableActorEventsAPI enables the Actor events API that enables clients to consume events // emitted by (smart contracts + built-in Actors). // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be @@ -612,27 +607,29 @@ type EventsConfig struct { // the entire chain) MaxFilterHeightRange uint64 - // DatabasePath is the full path to a sqlite database that will be used to index actor events to - // support the historic filter APIs. If the database does not exist it will be created. The directory containing - // the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - // relative to the CWD (current working directory). - DatabasePath string - // Others, not implemented yet: // Set a limit on the number of active websocket subscriptions (may be zero) // Set a timeout for subscription clients // Set upper bound on index size } -type IndexConfig struct { - // EXPERIMENTAL FEATURE. USE WITH CAUTION - // EnableMsgIndex enables indexing of messages on chain. - EnableMsgIndex bool +type ChainIndexerConfig struct { + // DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. + // Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. + // Only turn it off if you know what you are doing. + DisableChainIndexer bool // GCRetentionDays defines the number of days for which data is retained in the Indexer. // During the garbage collection (GC) process, data older than this retention period is pruned. // A value of 0 disables GC, retaining all historical data. + // Default is 0 i.e. GC is disabled by default. GCRetentionDays int64 + + // ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. + // This is useful when the indexer is not running for a long time and the chain has progressed. + // This will cause the indexer to re-index the entire chain state available on the node. + // Defaults to false. + ReconcileEmptyIndex bool } type HarmonyDB struct { diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index f8bf71bdaed..e4519c6bece 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -21,15 +21,20 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func ChainIndexer(cfg config.IndexConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { +func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { + if cfg.DisableChainIndexer { + return nil, nil + } + sqlitePath, err := r.SqlitePath() if err != nil { return nil, err } // TODO Implement config driven auto-backfilling - chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), cs, cfg.GCRetentionDays) + chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), + cs, cfg.GCRetentionDays, cfg.ReconcileEmptyIndex) if err != nil { return nil, err } From c30079e50e4b01155e40ca57451909998ea290e9 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 5 Sep 2024 18:19:07 +0400 Subject: [PATCH 18/66] mark events as reverted during reconciliation --- chainindex/ddls.go | 2 ++ chainindex/indexer.go | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 7aa06957de3..81173465e15 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -23,6 +23,8 @@ const ( stmtUpdateTipsetsToRevertedFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" + stmtUpdateEventsToRevertedFromHeight = "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE height >= ?)" + stmtIsTipsetMessageNonEmpty = "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)" stmtGetMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 3a055ba9ba5..561adf01248 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -44,6 +44,7 @@ type SqliteIndexer struct { removeTipsetsBeforeHeightStmt *sql.Stmt removeEthHashesOlderThanStmt *sql.Stmt updateTipsetsToRevertedFromHeightStmt *sql.Stmt + updateEventsToRevertedFromHeightStmt *sql.Stmt isTipsetMessageNonEmptyStmt *sql.Stmt getMinNonRevertedHeightStmt *sql.Stmt hasNonRevertedTipsetStmt *sql.Stmt @@ -178,6 +179,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", curTs.Height(), reconciliationEpoch) break } + tipsetStack = append(tipsetStack, curTs) // walk up @@ -198,6 +200,11 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if err != nil { return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } + // also need to mark events as reverted for the corresponding inclusion tipsets + _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)) + if err != nil { + return xerrors.Errorf("failed to mark events as reverted: %w", err) + } // Now apply all missing tipsets in reverse order i,e, we apply tipsets in [last matching tipset b/w index and canonical chain, // current chain head] @@ -288,6 +295,11 @@ func (si *SqliteIndexer) prepareStatements() error { return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) } + si.updateEventsToRevertedFromHeightStmt, err = si.db.Prepare(stmtUpdateEventsToRevertedFromHeight) + if err != nil { + return xerrors.Errorf("prepare %s: %w", "updateEventsToRevertedFromHeightStmt", err) + } + si.isTipsetMessageNonEmptyStmt, err = si.db.Prepare(stmtIsTipsetMessageNonEmpty) if err != nil { return xerrors.Errorf("prepare %s: %w", "isTipsetMessageNonEmptyStmt", err) From 01d78e3a6ce2b74e4ef0f5bdd350aeb64e7982ba Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 14:30:40 +0400 Subject: [PATCH 19/66] better reconciliation; pens down and code complete; also reconcile events --- chainindex/events.go | 1 + chainindex/helpers.go | 33 +++-- chainindex/indexer.go | 160 ++++++++++++--------- documentation/en/default-lotus-config.toml | 25 ++++ itests/kit/node_opts.go | 2 + node/config/def.go | 1 + node/config/doc_gen.go | 59 +++++--- node/config/types.go | 5 + node/modules/chainindex.go | 2 +- 9 files changed, 182 insertions(+), 106 deletions(-) diff --git a/chainindex/events.go b/chainindex/events.go index 7e0ef78cd3b..5ef8552a22b 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -32,6 +32,7 @@ type executedMessage struct { // events are indexed against their inclusion/message tipset when we get the corresponding execution tipset func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *types.TipSet, executionTs *types.TipSet) error { // check if we have an event indexed for any message in the `msgTs` tipset -> if so, there's nothig to do here + // this makes event inserts idempotent msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) if err != nil { return xerrors.Errorf("error getting tipset key cid: %w", err) diff --git a/chainindex/helpers.go b/chainindex/helpers.go index 469386968e6..76dedf93589 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -12,45 +12,48 @@ import ( ) func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { - // if a database already exists, we try to delete it and create a new one + // Check if a database already exists and attempt to delete it if _, err := os.Stat(path); err == nil { if err = os.Remove(path); err != nil { - return xerrors.Errorf("chainindex already exists at %s and can't be deleted", path) + return xerrors.Errorf("failed to delete existing chainindex at %s: %w", path, err) } } - si, err := NewSqliteIndexer(path, cs, 0, false) + si, err := NewSqliteIndexer(path, cs, 0, false, 0) if err != nil { return xerrors.Errorf("failed to create sqlite indexer: %w", err) } defer func() { - _ = si.Close() + if closeErr := si.Close(); closeErr != nil { + log.Errorf("failed to close sqlite indexer: %v", closeErr) + } }() err = withTx(ctx, si.db, func(tx *sql.Tx) error { - curTs := cs.GetHeaviestTipSet() - startHeight := curTs.Height() + head := cs.GetHeaviestTipSet() + startHeight := head.Height() + curTs := head + log.Infof("starting index hydration from snapshot at height %d", startHeight) for curTs != nil { - parentTs, err := cs.GetTipSetFromKey(ctx, curTs.Parents()) - if err != nil { - return xerrors.Errorf("error getting parent tipset: %w", err) - } - - if err := si.indexTipset(ctx, tx, curTs, parentTs, false); err != nil { - log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) + if err := si.indexTipset(ctx, tx, curTs); err != nil { + log.Infof("stopping import after %d tipsets with final error: %s", startHeight-curTs.Height(), err) break } - curTs = parentTs + curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } } return nil }) if err != nil { - return xerrors.Errorf("failed populate from snapshot: %w", err) + return xerrors.Errorf("failed to populate from snapshot: %w", err) } + log.Infof("Successfully populated chainindex from snapshot") return nil } diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 561adf01248..b89dc54196e 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -56,6 +56,7 @@ type SqliteIndexer struct { gcRetentionDays int64 reconcileEmptyIndex bool + maxReconcileTipsets int mu sync.Mutex updateSubs map[uint64]*updateSub @@ -65,7 +66,8 @@ type SqliteIndexer struct { closed bool } -func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconcileEmptyIndex bool) (si *SqliteIndexer, err error) { +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconcileEmptyIndex bool, + maxReconcileTipsets int) (si *SqliteIndexer, err error) { db, _, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) @@ -94,6 +96,7 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconci subIdCounter: 0, gcRetentionDays: gcRetentionDays, reconcileEmptyIndex: reconcileEmptyIndex, + maxReconcileTipsets: maxReconcileTipsets, } if err = si.prepareStatements(); err != nil { return nil, xerrors.Errorf("failed to prepare statements: %w", err) @@ -139,30 +142,35 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if err != nil { return xerrors.Errorf("failed to check if tipset message is empty: %w", err) } - if !hasTipset { + + isIndexEmpty := !hasTipset + if isIndexEmpty && !si.reconcileEmptyIndex { return nil } // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk - var result int64 - row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("failed to find boundary epoch: %w", err) + var reconciliationEpoch abi.ChainEpoch + if isIndexEmpty { + reconciliationEpoch = 0 + } else { + var result int64 + row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) + if err := row.Scan(&result); err != nil { + return xerrors.Errorf("failed to scan minimum non-reverted height %w", err) + } + reconciliationEpoch = abi.ChainEpoch(result) } - reconciliationEpoch := abi.ChainEpoch(result) - - var tipsetStack []*types.TipSet - - curTs := head + currTs := head log.Infof("Starting chain reconciliation from height %d", head.Height()) + var missingTipsets []*types.TipSet // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset // in the db so we know where to start reconciliation from // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted // All tpsets that exist in the canonical chain but not in the db are then applied - for curTs != nil && curTs.Height() >= reconciliationEpoch { - tsKeyCidBytes, err := toTipsetKeyCidBytes(curTs) + for currTs != nil && currTs.Height() >= reconciliationEpoch { + tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) if err != nil { return xerrors.Errorf("failed to compute tipset cid: %w", err) } @@ -175,50 +183,72 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if exists { // found it! - reconciliationEpoch = curTs.Height() + 1 - log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", curTs.Height(), reconciliationEpoch) + reconciliationEpoch = currTs.Height() + 1 + log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) break } - tipsetStack = append(tipsetStack, curTs) + if len(missingTipsets) <= si.maxReconcileTipsets { + missingTipsets = append(missingTipsets, currTs) + } + + if currTs.Height() == 0 { + break + } - // walk up - parents := curTs.Parents() - curTs, err = si.cs.GetTipSetFromKey(ctx, parents) + parents := currTs.Parents() + currTs, err = si.cs.GetTipSetFromKey(ctx, parents) if err != nil { return xerrors.Errorf("failed to walk chain: %w", err) } } - if curTs == nil { + if currTs == nil { log.Warn("ReconcileWithChain reached genesis without finding matching tipset") } // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) - _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) - if err != nil { + if _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)); err != nil { return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } + // also need to mark events as reverted for the corresponding inclusion tipsets - _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)) - if err != nil { + if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { return xerrors.Errorf("failed to mark events as reverted: %w", err) } - // Now apply all missing tipsets in reverse order i,e, we apply tipsets in [last matching tipset b/w index and canonical chain, - // current chain head] - for i := len(tipsetStack) - 1; i >= 0; i-- { - curTs := tipsetStack[i] - parentTs, err := si.cs.GetTipSetFromKey(ctx, curTs.Parents()) - if err != nil { - return xerrors.Errorf("failed to get parent tipset: %w", err) + totalIndexed := 0 + // apply all missing tipsets from the canonical chain to the current chain head + for i := 0; i < len(missingTipsets); i++ { + currTs := missingTipsets[i] + var parentTs *types.TipSet + var err error + + if i < len(missingTipsets)-1 { + parentTs = missingTipsets[i+1] + } else { + parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } } - if err := si.indexTipset(ctx, tx, curTs, parentTs, true); err != nil { - return xerrors.Errorf("failed to index tipset: %w", err) + + if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { + log.Warnf("failed to index tipset with parent events during reconciliation: %s", err) + // the above could have failed because of missing messages for `parentTs` in the chainstore + // so try to index only the currentTs and then halt the reconciliation process as we've + // reached the end of what we have in the chainstore + if err := si.indexTipset(ctx, tx, currTs); err != nil { + log.Warnf("failed to index tipset during reconciliation: %s", err) + } + break } + totalIndexed++ } + log.Infof("Indexed %d missing tipsets during reconciliation", totalIndexed) + return nil }) } @@ -403,7 +433,7 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro // We're moving the chain ahead from the `from` tipset to the `to` tipset // Height(to) > Height(from) err := withTx(ctx, si.db, func(tx *sql.Tx) error { - if err := si.indexTipset(ctx, tx, to, from, true); err != nil { + if err := si.indexTipsetWithParentEvents(ctx, tx, from, to); err != nil { return xerrors.Errorf("error indexing tipset: %w", err) } @@ -463,20 +493,14 @@ func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) err return nil } -func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet, parentTs *types.TipSet, indexEvents bool) error { +func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) + return xerrors.Errorf("failed to compute tipset cid: %w", err) } - parentsKeyCid, err := parentTs.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset parents cid: %w", err) - } - parentsKeyCidBytes := parentsKeyCid.Bytes() - - if restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes, parentsKeyCidBytes); err != nil { - return xerrors.Errorf("error restoring tipset: %w", err) + if restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes); err != nil { + return xerrors.Errorf("failed to restore tipset: %w", err) } else if restored { return nil } @@ -486,27 +510,21 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. msgs, err := si.cs.MessagesForTipset(ctx, ts) if err != nil { - return xerrors.Errorf("error getting messages for tipset: %w", err) + return xerrors.Errorf("failed to get messages for tipset: %w", err) } if len(msgs) == 0 { // If there are no messages, just insert the tipset and return if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, nil, -1); err != nil { - return xerrors.Errorf("error inserting empty tipset: %w", err) + return xerrors.Errorf("failed to insert empty tipset: %w", err) } - - // we still need to index events for the parent tipset - if err := si.indexEvents(ctx, tx, parentTs, ts); err != nil { - return xerrors.Errorf("error indexing events: %w", err) - } - return nil } for i, msg := range msgs { msg := msg if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { - return xerrors.Errorf("error inserting tipset message: %w", err) + return xerrors.Errorf("failed to insert tipset message: %w", err) } } @@ -514,7 +532,7 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. blk := blk _, smsgs, err := si.cs.MessagesForBlock(ctx, blk) if err != nil { - return err + return xerrors.Errorf("failed to get messages for block: %w", err) } for _, smsg := range smsgs { @@ -523,39 +541,43 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. continue } if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { - return xerrors.Errorf("error indexing eth tx hash: %w", err) + return xerrors.Errorf("failed to index eth tx hash: %w", err) } } } - if !indexEvents { - return nil + return nil +} + +func (si *SqliteIndexer) indexTipsetWithParentEvents(ctx context.Context, tx *sql.Tx, parentTs *types.TipSet, currentTs *types.TipSet) error { + // Index the parent tipset if it doesn't exist yet. + // This is necessary to properly index events produced by executing + // messages included in the parent tipset by the current tipset (deferred execution). + if err := si.indexTipset(ctx, tx, parentTs); err != nil { + return xerrors.Errorf("failed to index parent tipset: %w", err) + } + if err := si.indexTipset(ctx, tx, currentTs); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) } - // index events - if err := si.indexEvents(ctx, tx, parentTs, ts); err != nil { - return xerrors.Errorf("error indexing events: %w", err) + // Now Index events + if err := si.indexEvents(ctx, tx, parentTs, currentTs); err != nil { + return xerrors.Errorf("failed to index events: %w", err) } return nil } -func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte, parentsKeyCidBytes []byte) (bool, error) { +func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { // Check if the tipset already exists var exists bool if err := tx.Stmt(si.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { - return false, xerrors.Errorf("error checking if tipset exists: %w", err) + return false, xerrors.Errorf("failed to check if tipset exists: %w", err) } if exists { if _, err := tx.Stmt(si.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { - return false, xerrors.Errorf("error restoring tipset: %w", err) + return false, xerrors.Errorf("failed to restore tipset: %w", err) } - - // also mark all the events in the parent as not reverted - if _, err := tx.Stmt(si.updateEventsToNonRevertedStmt).ExecContext(ctx, parentsKeyCidBytes); err != nil { - return false, xerrors.Errorf("error unreverting events: %w", err) - } - return true, nil } return false, nil diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 49eff4197ba..987f27af050 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -294,15 +294,40 @@ [ChainIndexer] + # DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. + # Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. + # Only turn it off if you know what you are doing. + # + # type: bool # env var: LOTUS_CHAININDEXER_DISABLECHAININDEXER #DisableChainIndexer = false + # GCRetentionDays defines the number of days for which data is retained in the Indexer. + # During the garbage collection (GC) process, data older than this retention period is pruned. + # A value of 0 disables GC, retaining all historical data. + # Default is 0 i.e. GC is disabled by default. + # + # type: int64 # env var: LOTUS_CHAININDEXER_GCRETENTIONDAYS #GCRetentionDays = 0 + # ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. + # This is useful when the indexer is not running for a long time and the chain has progressed. + # This will cause the indexer to re-index the entire chain state available on the node. + # Defaults to false. + # + # type: bool # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX #ReconcileEmptyIndex = false + # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. + # This is useful to limit the amount of disk space used by the indexer. + # Defaults to 3 * epochsPerDay i.e. 3 days of chain history. + # + # type: int + # env var: LOTUS_CHAININDEXER_MAXRECONCILETIPSETS + #MaxReconcileTipsets = 8640 + [FaultReporter] # EnableConsensusFaultReporter controls whether the node will monitor and diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 246240a2620..41e3e2a4435 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -67,6 +67,8 @@ var DefaultNodeOpts = nodeOpts{ cfg.Fevm.EnableEthRPC = true cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true + cfg.ChainIndexer.ReconcileEmptyIndex = true + cfg.ChainIndexer.MaxReconcileTipsets = 10000 return nil }, }, diff --git a/node/config/def.go b/node/config/def.go index d8c1d4ab3a6..b7cd995d009 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -99,6 +99,7 @@ func DefaultFullNode() *FullNode { DisableChainIndexer: false, GCRetentionDays: 0, ReconcileEmptyIndex: false, + MaxReconcileTipsets: 3 * builtin.EpochsInDay, }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index aded56f986b..b7addc3cda4 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -71,6 +71,42 @@ your node if metadata log is disabled`, Comment: ``, }, }, + "ChainIndexerConfig": { + { + Name: "DisableChainIndexer", + Type: "bool", + + Comment: `DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. +Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. +Only turn it off if you know what you are doing.`, + }, + { + Name: "GCRetentionDays", + Type: "int64", + + Comment: `GCRetentionDays defines the number of days for which data is retained in the Indexer. +During the garbage collection (GC) process, data older than this retention period is pruned. +A value of 0 disables GC, retaining all historical data. +Default is 0 i.e. GC is disabled by default.`, + }, + { + Name: "ReconcileEmptyIndex", + Type: "bool", + + Comment: `ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. +This is useful when the indexer is not running for a long time and the chain has progressed. +This will cause the indexer to re-index the entire chain state available on the node. +Defaults to false.`, + }, + { + Name: "MaxReconcileTipsets", + Type: "int", + + Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. +This is useful to limit the amount of disk space used by the indexer. +Defaults to 3 * epochsPerDay i.e. 3 days of chain history.`, + }, + }, "Chainstore": { { Name: "EnableSplitstore", @@ -273,8 +309,8 @@ Note: Setting this value to 0 disables the cache.`, Comment: ``, }, { - Name: "Index", - Type: "IndexConfig", + Name: "ChainIndexer", + Type: "ChainIndexerConfig", Comment: ``, }, @@ -318,25 +354,6 @@ in a cluster. Only 1 is required`, Comment: `The port to find Yugabyte. Blank for default.`, }, }, - "IndexConfig": { - { - Name: "DisableChainIndexer", - Type: "bool", - - Comment: `DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. -Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. -Only turn it off if you know what you are doing.`, - }, - { - Name: "GCRetentionDays", - Type: "int64", - - Comment: `GCRetentionDays defines the number of days for which data is retained in the Indexer. -During the garbage collection (GC) process, data older than this retention period is pruned. -A value of 0 disables GC, retaining all historical data. -Default is 0 i.e. GC is disabled by default.`, - }, - }, "JournalConfig": { { Name: "DisabledEvents", diff --git a/node/config/types.go b/node/config/types.go index 804a2617cb1..cec12a288d7 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -630,6 +630,11 @@ type ChainIndexerConfig struct { // This will cause the indexer to re-index the entire chain state available on the node. // Defaults to false. ReconcileEmptyIndex bool + + // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. + // This is useful to limit the amount of disk space used by the indexer. + // Defaults to 3 * epochsPerDay i.e. 3 days of chain history. + MaxReconcileTipsets int } type HarmonyDB struct { diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index e4519c6bece..ed5f0625058 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -34,7 +34,7 @@ func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx help // TODO Implement config driven auto-backfilling chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), - cs, cfg.GCRetentionDays, cfg.ReconcileEmptyIndex) + cs, cfg.GCRetentionDays, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) if err != nil { return nil, err } From 994a717ca1987e9dc2bfcb4835f9f39c75c0a77f Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 14:44:20 +0400 Subject: [PATCH 20/66] fix tests --- chainindex/indexer.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index b89dc54196e..bbb611adb30 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -227,11 +227,16 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if i < len(missingTipsets)-1 { parentTs = missingTipsets[i+1] - } else { + } else if currTs.Height() > 0 { parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) if err != nil { return xerrors.Errorf("failed to get parent tipset: %w", err) } + } else if currTs.Height() == 0 { + if err := si.indexTipset(ctx, tx, currTs); err != nil { + log.Warnf("failed to index tipset during reconciliation: %s", err) + } + break } if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { From ad9bcb14fd97413a73d3087d3bf63283a41bdb4e Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 16:21:29 +0400 Subject: [PATCH 21/66] improve config and docs --- node/builder_chain.go | 2 +- node/config/def.go | 2 +- node/config/types.go | 53 ++++++++++++++++++++++++++------------ node/modules/chainindex.go | 2 +- 4 files changed, 40 insertions(+), 19 deletions(-) diff --git a/node/builder_chain.go b/node/builder_chain.go index a6e80ff7fa1..6f2ed0f7701 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -283,7 +283,7 @@ func ConfigFullNode(c interface{}) Option { ), Override(new(chainindex.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), - If(!cfg.ChainIndexer.DisableChainIndexer, + If(!cfg.ChainIndexer.DisableIndexer, Override(InitChainIndexerKey, modules.InitChainIndexer), ), ) diff --git a/node/config/def.go b/node/config/def.go index b7cd995d009..590a5e08274 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -96,7 +96,7 @@ func DefaultFullNode() *FullNode { MaxFilterHeightRange: 2880, // conservative limit of one day }, ChainIndexer: ChainIndexerConfig{ - DisableChainIndexer: false, + DisableIndexer: false, GCRetentionDays: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, diff --git a/node/config/types.go b/node/config/types.go index cec12a288d7..cfabdb7e336 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -614,26 +614,47 @@ type EventsConfig struct { } type ChainIndexerConfig struct { - // DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. - // Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. - // Only turn it off if you know what you are doing. - DisableChainIndexer bool - - // GCRetentionDays defines the number of days for which data is retained in the Indexer. - // During the garbage collection (GC) process, data older than this retention period is pruned. - // A value of 0 disables GC, retaining all historical data. - // Default is 0 i.e. GC is disabled by default. + // DisableIndexer controls whether the chain indexer is active. + // The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + // It is a crucial component for optimizing Lotus RPC response times. + // + // Default: false (indexer is enabled) + // + // Setting this to true will disable the indexer, which may significantly impact RPC performance. + // It is strongly recommended to keep this set to false unless you have a specific reason to disable it + // and fully understand the implications. + DisableIndexer bool + + // GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. + // The garbage collection (GC) process removes data older than this retention period. + // Setting this to 0 disables GC, preserving all historical data indefinitely. + // + // Default: 0 (GC disabled) GCRetentionDays int64 - // ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. - // This is useful when the indexer is not running for a long time and the chain has progressed. - // This will cause the indexer to re-index the entire chain state available on the node. - // Defaults to false. + // ReconcileEmptyIndex determines whether to reconcile the index with the chain state + // during startup when the index is empty. + // + // When set to true: + // - On startup, if the index is empty, the indexer will index the available + // chain state on the node albeit within the `MaxReconcileTipsets` limit. + // + // When set to false: + // - The indexer will not automatically re-index the chain state on startup if the index is empty. + // + // Default: false + // + // Note: The number of tipsets reconciled (i.e. indexed) during this process can be + // controlled using the `MaxReconcileTipsets` option. ReconcileEmptyIndex bool - // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. - // This is useful to limit the amount of disk space used by the indexer. - // Defaults to 3 * epochsPerDay i.e. 3 days of chain history. + // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + // It represents the maximum number of tipsets to index from the chain state that are absent in the index. + // + // Default: 3 * epochsPerDay (approximately 3 days of chain history) + // + // Note: Setting this value too low may result in incomplete indexing, while setting it too high + // may increase startup time. MaxReconcileTipsets int } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index ed5f0625058..7f03b191c4e 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -23,7 +23,7 @@ import ( func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { - if cfg.DisableChainIndexer { + if cfg.DisableIndexer { return nil, nil } From 0c9a0caa636d0ee903edc9de66540da825b017cc Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 16:51:44 +0400 Subject: [PATCH 22/66] improve docs and error handling --- chain/events/observer.go | 33 +++++++++++++++++++++++++-------- node/impl/full/eth.go | 27 +++++++++++++-------------- node/modules/chainindex.go | 10 ++++++++-- 3 files changed, 46 insertions(+), 24 deletions(-) diff --git a/chain/events/observer.go b/chain/events/observer.go index 56dc1dfd897..5401ff96cc6 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -244,22 +244,39 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err return nil } -// ObserveAndBlock registers the observer and returns the current tipset along with a handle function. -// The observer is guaranteed to observe events starting at this tipset. -// The returned handle function should be called by the client when it's ready to receive updates. +// ObserveAndBlock registers the observer and returns the current tipset along with an unlock function. // -// This function should only be called by the client after the observer has been started. -// Note that the Observer will block all clients from receiving tipset updates until the handle is called. -func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func()) { +// This method guarantees that the observer will receive tipset updates starting from the returned tipset. +// It blocks all tipset updates for all clients until the returned unlock function is called. +// +// The typical usage pattern is: +// 1. Call ObserveAndBlock to register the observer +// 2. Perform any necessary initialization using the returned current tipset +// 3. Call the unlock function to start receiving updates +// +// Important notes: +// - This method should only be called after the observer has been started +// - The unlock function must be called to prevent blocking of tipset updates for all registered observers +// - This method returns an error if the observer hasn't started yet +// +// Returns: +// - *types.TipSet: The current tipset at the time of registration +// - func(): An unlock function that must be called to start receiving updates +// - error: An error if the observer hasn't started yet +func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func(), error) { o.lk.Lock() - o.observers = append(o.observers, obs) currentHead := o.head + if currentHead == nil { + o.lk.Unlock() + return nil, func() {}, xerrors.New("observer not started") + } + o.observers = append(o.observers, obs) unlockHandle := func() { o.lk.Unlock() } - return currentHead, unlockHandle + return currentHead, unlockHandle, nil } // Observe registers the observer, and returns the current tipset. The observer is guaranteed to diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 0591ed2ad98..cd4fa9ce8a4 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -360,7 +360,8 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } } @@ -425,15 +426,13 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } } - // We fall out of the first condition and continue if errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in lookup table", txHash.String()) - } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) } else if a.ChainIndexer != nil { return &c, nil } @@ -520,7 +519,8 @@ func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash if err != nil && errors.Is(err, chainindex.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } } @@ -1575,19 +1575,18 @@ func (e *EthEventHandler) getEthLogsForBlockAndTransaction(ctx context.Context, func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { ces, err := e.ethGetEventsForFilter(ctx, filterSpec) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to get events for filter: %w", err) } return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, - filterSpec *ethtypes.EthFilterSpec) ([]*chainindex.CollectedEvent, error) { +func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*chainindex.CollectedEvent, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } if e.EventFilterManager.ChainIndexer == nil { - return nil, xerrors.Errorf("cannot use eth_get_logs if historical event index is disabled") + return nil, xerrors.Errorf("cannot use `eth_get_logs` if chain indexer is disabled") } pf, err := e.parseEthFilterSpec(filterSpec) @@ -1603,12 +1602,12 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) } if ts.Height() >= head.Height() { - return nil, xerrors.Errorf("cannot ask for events for a tipset >= head") + return nil, xerrors.New("cannot ask for events for a tipset >= head") } } - if pf.maxHeight >= head.Height() { - return nil, xerrors.Errorf("cannot ask for events for a tipset >= head") + if pf.minHeight >= head.Height() || pf.maxHeight >= head.Height() { + return nil, xerrors.New("cannot ask for events for a tipset >= head") } ef := &chainindex.EventFilter{ @@ -1622,7 +1621,7 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, ces, err := e.EventFilterManager.ChainIndexer.GetEventsForFilter(ctx, ef, true) if err != nil { - return nil, xerrors.Errorf("failed to get events for filter: %w", err) + return nil, xerrors.Errorf("failed to get events for filter from chain indexer: %w", err) } return ces, nil diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 7f03b191c4e..2b61f251c77 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -76,8 +76,14 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind } // Tipset listener - tipset, unlockObserver := ev.ObserveAndBlock(indexer) - if err := indexer.ReconcileWithChain(ctx, tipset); err != nil { + + // `ObserveAndBlock` returns the current head and guarantees that it will call the observer with all future tipsets + head, unlockObserver, err := ev.ObserveAndBlock(indexer) + if err != nil { + return xerrors.Errorf("error while observing tipsets: %w", err) + } + if err := indexer.ReconcileWithChain(ctx, head); err != nil { + unlockObserver() return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) } unlockObserver() From 89cedb2cd152940b155a65e660e8773a346ca62f Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 17:47:56 +0400 Subject: [PATCH 23/66] improve read logic --- chainindex/indexer.go | 12 ++++++++++++ chainindex/read.go | 42 ++++++++++++++++++++++++------------------ 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index bbb611adb30..a7ba799955b 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -285,46 +285,57 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "insertEthTxHashStmt", err) } + si.getNonRevertedMsgInfoStmt, err = si.db.Prepare(stmtGetNonRevertedMessageInfo) if err != nil { return xerrors.Errorf("prepare %s: %w", "getNonRevertedMsgInfoStmt", err) } + si.getMsgCidFromEthHashStmt, err = si.db.Prepare(stmtGetMsgCidFromEthHash) if err != nil { return xerrors.Errorf("prepare %s: %w", "getMsgCidFromEthHashStmt", err) } + si.insertTipsetMessageStmt, err = si.db.Prepare(stmtInsertTipsetMessage) if err != nil { return xerrors.Errorf("prepare %s: %w", "insertTipsetMessageStmt", err) } + si.hasTipsetStmt, err = si.db.Prepare(stmtHasTipset) if err != nil { return xerrors.Errorf("prepare %s: %w", "hasTipsetStmt", err) } + si.updateTipsetToNonRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToNonReverted) if err != nil { return xerrors.Errorf("prepare %s: %w", "updateTipsetToNonRevertedStmt", err) } + si.updateTipsetToRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToReverted) if err != nil { return xerrors.Errorf("prepare %s: %w", "updateTipsetToRevertedStmt", err) } + si.getMaxNonRevertedTipsetStmt, err = si.db.Prepare(stmtGetMaxNonRevertedTipset) if err != nil { return xerrors.Errorf("prepare %s: %w", "getMaxNonRevertedTipsetStmt", err) } + si.removeRevertedTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveRevertedTipsetsBeforeHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "removeRevertedTipsetsBeforeHeightStmt", err) } + si.removeTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveTipsetsBeforeHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "removeTipsetsBeforeHeightStmt", err) } + si.removeEthHashesOlderThanStmt, err = si.db.Prepare(stmtRemoveEthHashesOlderThan) if err != nil { return xerrors.Errorf("prepare %s: %w", "removeEthHashesOlderThanStmt", err) } + si.updateTipsetsToRevertedFromHeightStmt, err = si.db.Prepare(stmtUpdateTipsetsToRevertedFromHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) @@ -369,6 +380,7 @@ func (si *SqliteIndexer) prepareStatements() error { if err != nil { return xerrors.Errorf("prepare %s: %w", "insertEventStmt", err) } + si.insertEventEntryStmt, err = si.db.Prepare(stmtInsertEventEntry) if err != nil { return xerrors.Errorf("prepare %s: %w", "insertEventEntryStmt", err) diff --git a/chainindex/read.go b/chainindex/read.go index d512ddace56..660551683a7 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -77,10 +77,10 @@ func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*M func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc func() error) error { err := readFunc() if err == sql.ErrNoRows { + // not found, but may be in latest head, so wait for it and check again if err := si.waitTillHeadIndexed(ctx); err != nil { - return xerrors.Errorf("error waiting for head to be indexed: %w", err) + return xerrors.Errorf("failed while waiting for head to be indexed: %w", err) } - // not found, but may be in latest head, so wait for it and check again err = readFunc() } @@ -98,15 +98,6 @@ func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, t return si.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) } -func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCid []byte) (bool, error) { - var exists bool - err := si.hasTipsetStmt.QueryRowContext(ctx, tsKeyCid).Scan(&exists) - if err != nil { - return false, xerrors.Errorf("error checking if tipset exists: %w", err) - } - return exists, nil -} - func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, headIndexedWaitTimeout) defer cancel() @@ -114,12 +105,12 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { head := si.cs.GetHeaviestTipSet() headTsKeyCidBytes, err := toTipsetKeyCidBytes(head) if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) + return xerrors.Errorf("failed to get tipset key cid: %w", err) } // is it already indexed? - if exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes); err != nil { - return xerrors.Errorf("error checking if tipset exists: %w", err) + if exists, err := si.hasNonRevertedTipset(ctx, headTsKeyCidBytes); err != nil { + return xerrors.Errorf("failed to check if tipset exists: %w", err) } else if exists { return nil } @@ -129,11 +120,10 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { defer unsubFn() for ctx.Err() == nil { - exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) + exists, err := si.hasNonRevertedTipset(ctx, headTsKeyCidBytes) if err != nil { - return xerrors.Errorf("error checking if tipset exists: %w", err) - } - if exists { + return xerrors.Errorf("failed to check if tipset exists: %w", err) + } else if exists { return nil } @@ -146,3 +136,19 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { } return ctx.Err() } + +func (si *SqliteIndexer) hasNonRevertedTipset(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { + var exists bool + if err := si.hasNonRevertedTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset is indexed and non-reverted: %w", err) + } + return exists, nil +} + +func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { + var exists bool + if err := si.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } + return exists, nil +} From a2a2b764fa2b973857b85772931844903fb9e276 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 6 Sep 2024 18:21:15 +0400 Subject: [PATCH 24/66] improve docs --- chainindex/events.go | 24 +++++++++++------------- chainindex/gc.go | 2 +- chainindex/helpers.go | 12 +++++++----- chainindex/pub_sub.go | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/chainindex/events.go b/chainindex/events.go index 5ef8552a22b..3fb2eb5c8ce 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -35,17 +35,17 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // this makes event inserts idempotent msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) + return xerrors.Errorf("failed to get tipset key cid: %w", err) } // if we've already indexed events for this tipset, mark them as unreverted and return res, err := tx.Stmt(si.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) if err != nil { - return xerrors.Errorf("error unreverting events for tipset: %w", err) + return xerrors.Errorf("failed to unrevert events for tipset: %w", err) } rows, err := res.RowsAffected() if err != nil { - return xerrors.Errorf("error unreverting events for tipset: %w", err) + return xerrors.Errorf("failed to get rows affected by unreverting events for tipset: %w", err) } if rows > 0 { return nil @@ -53,7 +53,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ ems, err := si.loadExecutedMessages(ctx, msgTs, executionTs) if err != nil { - return xerrors.Errorf("error loading executed messages: %w", err) + return xerrors.Errorf("failed to load executed messages: %w", err) } eventCount := 0 addressLookups := make(map[abi.ActorID]address.Address) @@ -64,7 +64,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // read message id for this message cid and tipset key cid var messageID int64 if err := tx.Stmt(si.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgCidBytes, msgTsKeyCidBytes).Scan(&messageID); err != nil { - return xerrors.Errorf("error getting message id for message cid and tipset key cid: %w", err) + return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) } if messageID == 0 { return xerrors.Errorf("message id not found for message cid %s and tipset key cid %s", em.msg.Cid(), msgTs.Key()) @@ -86,13 +86,13 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // Insert event into events table eventResult, err := tx.Stmt(si.insertEventStmt).Exec(messageID, eventCount, addr.Bytes(), 0) if err != nil { - return xerrors.Errorf("error inserting event: %w", err) + return xerrors.Errorf("failed to insert event: %w", err) } // Get the event_id of the inserted event eventID, err := eventResult.LastInsertId() if err != nil { - return xerrors.Errorf("error getting last insert id for event: %w", err) + return xerrors.Errorf("failed to get last insert id for event: %w", err) } // Insert event entries @@ -106,7 +106,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ entry.Value, ) if err != nil { - return xerrors.Errorf("error inserting event entry: %w", err) + return xerrors.Errorf("failed to insert event entry: %w", err) } } eventCount++ @@ -221,7 +221,6 @@ func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventF } // getTipsetKeyCidByHeight retrieves the tipset key CID for a given height. -// It returns nil if no tipset is found at the exact height. func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi.ChainEpoch) ([]byte, error) { ts, err := si.cs.GetTipsetByHeight(ctx, height, nil, false) if err != nil { @@ -229,14 +228,13 @@ func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi } if ts.Height() != height { - return nil, nil // No tipset at exact height + return nil, ErrNotFound // No tipset at exact height } return toTipsetKeyCidBytes(ts) } // GetEventsForFilter returns matching events for the given filter -// prefillFilter fills a filter's collection of events from the historic index // Returns nil, nil if the filter has no matching events // Returns nil, ErrNotFound if the filter has no matching events and the tipset is not indexed // Returns nil, err for all other errors @@ -257,7 +255,7 @@ func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, if err == sql.ErrNoRows { // did not find events, but may be in head, so wait for it and check again if err := si.waitTillHeadIndexed(ctx); err != nil { - return nil, xerrors.Errorf("error waiting for head to be indexed: %w", err) + return nil, xerrors.Errorf("failed to wait for head to be indexed: %w", err) } q, err = stmt.QueryContext(ctx, values...) } @@ -387,7 +385,7 @@ func (si *SqliteIndexer) sanityCheckFilter(ctx context.Context, f *EventFilter) if f.TipsetCid != cid.Undef { ts, err := si.cs.GetTipSetByCid(ctx, f.TipsetCid) if err != nil { - return xerrors.Errorf("error getting tipset by cid: %w", err) + return xerrors.Errorf("failed to get tipset by cid: %w", err) } if ts.Height() >= head.Height() { return xerrors.New("cannot ask for events for a tipset >= head") diff --git a/chainindex/gc.go b/chainindex/gc.go index f444b8625f4..b9c1a7fad1d 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -89,7 +89,7 @@ func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { } // remove all entries from the `tipsets` table where `reverted=true` and height is < finalEpoch - // cacade delete based on foreign key constraints takes care of cleaning up the other tables + // cascade delete based on foreign key constraints takes care of cleaning up the other tables res, err := si.removeRevertedTipsetsBeforeHeightStmt.ExecContext(ctx, finalEpoch) if err != nil { log.Errorw("failed to remove reverted tipsets before height", "height", finalEpoch, "error", err) diff --git a/chainindex/helpers.go b/chainindex/helpers.go index 76dedf93589..174d4aac639 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -25,10 +25,12 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error } defer func() { if closeErr := si.Close(); closeErr != nil { - log.Errorf("failed to close sqlite indexer: %v", closeErr) + log.Errorf("failed to close sqlite indexer: %s", closeErr) } }() + totalIndexed := 0 + err = withTx(ctx, si.db, func(tx *sql.Tx) error { head := cs.GetHeaviestTipSet() startHeight := head.Height() @@ -40,6 +42,7 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error log.Infof("stopping import after %d tipsets with final error: %s", startHeight-curTs.Height(), err) break } + totalIndexed++ curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) if err != nil { @@ -53,12 +56,11 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error return xerrors.Errorf("failed to populate from snapshot: %w", err) } - log.Infof("Successfully populated chainindex from snapshot") + log.Infof("Successfully populated chainindex from snapshot with %d tipsets", totalIndexed) return nil } func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer Indexer) { - for ctx.Err() == nil { select { case <-ctx.Done(): @@ -72,7 +74,7 @@ func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer } err := indexer.IndexSignedMessage(ctx, u.Message) if err != nil { - log.Errorw("error indexing signed Mpool message", "error", err) + log.Errorw("failed to index signed Mpool message", "error", err) } } } @@ -82,7 +84,7 @@ func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer func toTipsetKeyCidBytes(ts *types.TipSet) ([]byte, error) { tsKeyCid, err := ts.Key().Cid() if err != nil { - return nil, xerrors.Errorf("error getting tipset key cid: %w", err) + return nil, xerrors.Errorf("failed to get tipset key cid: %w", err) } return tsKeyCid.Bytes(), nil } diff --git a/chainindex/pub_sub.go b/chainindex/pub_sub.go index 15cb6a35858..b24b154e4b9 100644 --- a/chainindex/pub_sub.go +++ b/chainindex/pub_sub.go @@ -12,7 +12,7 @@ type updateSub struct { type chainIndexUpdated struct{} func (si *SqliteIndexer) subscribeUpdates() (chan chainIndexUpdated, func()) { - subCtx, subCancel := context.WithCancel(context.Background()) + subCtx, subCancel := context.WithCancel(si.ctx) ch := make(chan chainIndexUpdated) si.mu.Lock() From 44af9f824e6e822612d5d5757af5c0dacfbec7ca Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 9 Sep 2024 12:12:58 +0400 Subject: [PATCH 25/66] better logging and handle ennable event storage --- chain/events/observer.go | 4 ++-- chainindex/events.go | 5 +++++ chainindex/indexer.go | 11 +++++++++-- chainindex/interface.go | 1 + node/builder_chain.go | 2 +- node/config/def.go | 4 ++-- node/modules/chainindex.go | 1 + 7 files changed, 21 insertions(+), 7 deletions(-) diff --git a/chain/events/observer.go b/chain/events/observer.go index 5401ff96cc6..592585b04ed 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -206,7 +206,7 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err for _, obs := range observers { if err := obs.Revert(ctx, from, to); err != nil { - log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) + log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) } } @@ -232,7 +232,7 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err for _, obs := range observers { if err := obs.Apply(ctx, head, to); err != nil { - log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) + log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) } } if to.Height() > o.maxHeight { diff --git a/chainindex/events.go b/chainindex/events.go index 3fb2eb5c8ce..7e1859b4f6d 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -48,6 +48,11 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ return xerrors.Errorf("failed to get rows affected by unreverting events for tipset: %w", err) } if rows > 0 { + log.Infof("unreverted %d events for tipset: %s", rows, msgTs.Key()) + return nil + } + + if !si.cs.IsStoringEvents() { return nil } diff --git a/chainindex/indexer.go b/chainindex/indexer.go index a7ba799955b..89a4fe14907 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -145,6 +145,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip isIndexEmpty := !hasTipset if isIndexEmpty && !si.reconcileEmptyIndex { + log.Info("Chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") return nil } @@ -162,7 +163,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } currTs := head - log.Infof("Starting chain reconciliation from height %d", head.Height()) + log.Infof("Starting chain reconciliation from height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) var missingTipsets []*types.TipSet // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset @@ -209,9 +210,15 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) - if _, err = tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)); err != nil { + result, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) + if err != nil { return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } + rowsAffected, err := result.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get number of rows affected: %w", err) + } + log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) // also need to mark events as reverted for the corresponding inclusion tipsets if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { diff --git a/chainindex/interface.go b/chainindex/interface.go index 013a3650402..75facd9f122 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -76,6 +76,7 @@ type ChainStore interface { MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) ActorStore(ctx context.Context) adt.Store GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) + IsStoringEvents() bool } var _ ChainStore = (*store.ChainStore)(nil) diff --git a/node/builder_chain.go b/node/builder_chain.go index 6f2ed0f7701..2f0be6d503f 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -238,7 +238,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || !cfg.ChainIndexer.DisableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), diff --git a/node/config/def.go b/node/config/def.go index 590a5e08274..79f1013631d 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,13 +83,13 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: false, + EnableEthRPC: true, EthTraceFilterMaxResults: 500, EthBlkCacheSize: 500, }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, - EnableActorEventsAPI: false, + EnableActorEventsAPI: true, FilterTTL: Duration(time.Hour * 1), MaxFilters: 100, MaxFilterResults: 10000, diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 2b61f251c77..5ab1d76b6a1 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -86,6 +86,7 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind unlockObserver() return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) } + log.Infof("Chain indexer reconciled with chain state; observer will start upates from height: %d", head.Height()) unlockObserver() ch, err := mp.Updates(ctx) From 6608b80c057de3958792d5aafc2038f0cfefa632 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 9 Sep 2024 12:40:45 +0400 Subject: [PATCH 26/66] improve logs and index init proc --- chainindex/helpers.go | 9 +++++---- chainindex/indexer.go | 7 +++++-- chainindex/interface.go | 1 + node/modules/chainindex.go | 5 ++++- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/chainindex/helpers.go b/chainindex/helpers.go index 174d4aac639..fcc10de0e60 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -12,8 +12,10 @@ import ( ) func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { + log.Infof("populating chainindex at path %s from snapshot", path) // Check if a database already exists and attempt to delete it if _, err := os.Stat(path); err == nil { + log.Infof("deleting existing chainindex at %s", path) if err = os.Remove(path); err != nil { return xerrors.Errorf("failed to delete existing chainindex at %s: %w", path, err) } @@ -33,13 +35,12 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error err = withTx(ctx, si.db, func(tx *sql.Tx) error { head := cs.GetHeaviestTipSet() - startHeight := head.Height() curTs := head - log.Infof("starting index hydration from snapshot at height %d", startHeight) + log.Infof("starting to populate chainindex from snapshot at head height %d", head.Height()) for curTs != nil { if err := si.indexTipset(ctx, tx, curTs); err != nil { - log.Infof("stopping import after %d tipsets with final error: %s", startHeight-curTs.Height(), err) + log.Infof("stopping chainindex population at height %d with final error: %s", curTs.Height(), err) break } totalIndexed++ @@ -53,7 +54,7 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error return nil }) if err != nil { - return xerrors.Errorf("failed to populate from snapshot: %w", err) + return xerrors.Errorf("failed to populate chainindex from snapshot: %w", err) } log.Infof("Successfully populated chainindex from snapshot with %d tipsets", totalIndexed) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 89a4fe14907..ca82449551d 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -102,10 +102,13 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconci return nil, xerrors.Errorf("failed to prepare statements: %w", err) } + return si, nil +} + +func (si *SqliteIndexer) Start() error { si.wg.Add(1) go si.gcLoop() - - return si, nil + return nil } func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddrFunc) { diff --git a/chainindex/interface.go b/chainindex/interface.go index 75facd9f122..327349d4892 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -50,6 +50,7 @@ type EventFilter struct { } type Indexer interface { + Start() error ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 5ab1d76b6a1..5a1609c8dc0 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -55,7 +55,6 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind lc.Append(fx.Hook{ OnStart: func(_ context.Context) error { - indexer.SetIdToRobustAddrFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { idAddr, err := address.NewIDAddress(uint64(emitter)) if err != nil { @@ -70,6 +69,10 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind return *actor.DelegatedAddress, true }) + if err := indexer.Start(); err != nil { + return err + } + ev, err := events.NewEvents(ctx, &evapi) if err != nil { return err From 625d8c86249bef1eddbbffa079142ab54a7c5dcc Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 9 Sep 2024 17:10:08 +0400 Subject: [PATCH 27/66] better logging --- chainindex/indexer.go | 17 +++++-- documentation/en/default-lotus-config.toml | 57 +++++++++++++++------- node/config/doc_gen.go | 51 +++++++++++++------ node/config/types.go | 4 +- node/impl/full/eth.go | 7 +-- node/modules/chainindex.go | 1 + 6 files changed, 96 insertions(+), 41 deletions(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index ca82449551d..5e9ab8697e5 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -166,7 +166,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } currTs := head - log.Infof("Starting chain reconciliation from height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) + log.Infof("Starting chain reconciliation from head height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) var missingTipsets []*types.TipSet // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset @@ -194,9 +194,13 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if len(missingTipsets) <= si.maxReconcileTipsets { missingTipsets = append(missingTipsets, currTs) + } else if isIndexEmpty { + // if chain index is empty, we can short circuit here as the index has minimum epoch for reconciliation i.e. it is always 0 + break } if currTs.Height() == 0 { + log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") break } @@ -207,7 +211,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } } - if currTs == nil { + if currTs == nil && !isIndexEmpty { log.Warn("ReconcileWithChain reached genesis without finding matching tipset") } @@ -228,6 +232,8 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip return xerrors.Errorf("failed to mark events as reverted: %w", err) } + log.Infof("Applying %d missing tipsets to Index; max missing tipset height %d; min missing tipset height %d", len(missingTipsets), + missingTipsets[0].Height(), missingTipsets[len(missingTipsets)-1].Height()) totalIndexed := 0 // apply all missing tipsets from the canonical chain to the current chain head for i := 0; i < len(missingTipsets); i++ { @@ -244,7 +250,9 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } } else if currTs.Height() == 0 { if err := si.indexTipset(ctx, tx, currTs); err != nil { - log.Warnf("failed to index tipset during reconciliation: %s", err) + log.Warnf("failed to index genesis tipset during reconciliation: %s", err) + } else { + totalIndexed++ } break } @@ -256,9 +264,12 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip // reached the end of what we have in the chainstore if err := si.indexTipset(ctx, tx, currTs); err != nil { log.Warnf("failed to index tipset during reconciliation: %s", err) + } else { + totalIndexed++ } break } + totalIndexed++ } diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 987f27af050..7914752edd0 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -226,7 +226,7 @@ # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC - #EnableEthRPC = false + #EnableEthRPC = true # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # @@ -260,7 +260,7 @@ # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI - #EnableActorEventsAPI = false + #EnableActorEventsAPI = true # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than # this time become eligible for automatic deletion. Filters consume resources, so if they are unused they @@ -294,35 +294,56 @@ [ChainIndexer] - # DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. - # Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. - # Only turn it off if you know what you are doing. + # DisableIndexer controls whether the chain indexer is active. + # The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + # It is a crucial component for optimizing Lotus RPC response times. + # + # Default: false (indexer is enabled) + # + # Setting this to true will disable the indexer, which may significantly impact RPC performance. + # It is strongly recommended to keep this set to false unless you have a specific reason to disable it + # and fully understand the implications. # # type: bool - # env var: LOTUS_CHAININDEXER_DISABLECHAININDEXER - #DisableChainIndexer = false + # env var: LOTUS_CHAININDEXER_DISABLEINDEXER + #DisableIndexer = false - # GCRetentionDays defines the number of days for which data is retained in the Indexer. - # During the garbage collection (GC) process, data older than this retention period is pruned. - # A value of 0 disables GC, retaining all historical data. - # Default is 0 i.e. GC is disabled by default. + # GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. + # The garbage collection (GC) process removes data older than this retention period. + # Setting this to 0 disables GC, preserving all historical data indefinitely. + # + # Default: 0 (GC disabled) # # type: int64 # env var: LOTUS_CHAININDEXER_GCRETENTIONDAYS #GCRetentionDays = 0 - # ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. - # This is useful when the indexer is not running for a long time and the chain has progressed. - # This will cause the indexer to re-index the entire chain state available on the node. - # Defaults to false. + # ReconcileEmptyIndex determines whether to reconcile the index with the chain state + # during startup when the index is empty. + # + # When set to true: + # - On startup, if the index is empty, the indexer will index the available + # chain state on the node albeit within the MaxReconcileTipsets limit. + # + # When set to false: + # - The indexer will not automatically re-index the chain state on startup if the index is empty. + # + # Default: false + # + # Note: The number of tipsets reconciled (i.e. indexed) during this process can be + # controlled using the MaxReconcileTipsets option. # # type: bool # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX #ReconcileEmptyIndex = false - # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. - # This is useful to limit the amount of disk space used by the indexer. - # Defaults to 3 * epochsPerDay i.e. 3 days of chain history. + # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + # It represents the maximum number of tipsets to index from the chain state that are absent in the index. + # + # Default: 3 * epochsPerDay (approximately 3 days of chain history) + # + # Note: Setting this value too low may result in incomplete indexing, while setting it too high + # may increase startup time. # # type: int # env var: LOTUS_CHAININDEXER_MAXRECONCILETIPSETS diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index b7addc3cda4..1da78d7fbd2 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -73,38 +73,59 @@ your node if metadata log is disabled`, }, "ChainIndexerConfig": { { - Name: "DisableChainIndexer", + Name: "DisableIndexer", Type: "bool", - Comment: `DisableChainIndexer disables the chain indexer which indexes tipsets, messages and events from chain state. -Ideally, this should always be set to false as the Indexer is a crucial component for faster Lotus RPC responses. -Only turn it off if you know what you are doing.`, + Comment: `DisableIndexer controls whether the chain indexer is active. +The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. +It is a crucial component for optimizing Lotus RPC response times. + +Default: false (indexer is enabled) + +Setting this to true will disable the indexer, which may significantly impact RPC performance. +It is strongly recommended to keep this set to false unless you have a specific reason to disable it +and fully understand the implications.`, }, { Name: "GCRetentionDays", Type: "int64", - Comment: `GCRetentionDays defines the number of days for which data is retained in the Indexer. -During the garbage collection (GC) process, data older than this retention period is pruned. -A value of 0 disables GC, retaining all historical data. -Default is 0 i.e. GC is disabled by default.`, + Comment: `GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. +The garbage collection (GC) process removes data older than this retention period. +Setting this to 0 disables GC, preserving all historical data indefinitely. + +Default: 0 (GC disabled)`, }, { Name: "ReconcileEmptyIndex", Type: "bool", - Comment: `ReconcileEmptyIndex reconciles the index with the chain state even if the Index is empty. -This is useful when the indexer is not running for a long time and the chain has progressed. -This will cause the indexer to re-index the entire chain state available on the node. -Defaults to false.`, + Comment: `ReconcileEmptyIndex determines whether to reconcile the index with the chain state +during startup when the index is empty. + +When set to true: +- On startup, if the index is empty, the indexer will index the available +chain state on the node albeit within the MaxReconcileTipsets limit. + +When set to false: +- The indexer will not automatically re-index the chain state on startup if the index is empty. + +Default: false + +Note: The number of tipsets reconciled (i.e. indexed) during this process can be +controlled using the MaxReconcileTipsets option.`, }, { Name: "MaxReconcileTipsets", Type: "int", - Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain. -This is useful to limit the amount of disk space used by the indexer. -Defaults to 3 * epochsPerDay i.e. 3 days of chain history.`, + Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. +It represents the maximum number of tipsets to index from the chain state that are absent in the index. + +Default: 3 * epochsPerDay (approximately 3 days of chain history) + +Note: Setting this value too low may result in incomplete indexing, while setting it too high +may increase startup time.`, }, }, "Chainstore": { diff --git a/node/config/types.go b/node/config/types.go index cfabdb7e336..30a6e9c7188 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -637,7 +637,7 @@ type ChainIndexerConfig struct { // // When set to true: // - On startup, if the index is empty, the indexer will index the available - // chain state on the node albeit within the `MaxReconcileTipsets` limit. + // chain state on the node albeit within the MaxReconcileTipsets limit. // // When set to false: // - The indexer will not automatically re-index the chain state on startup if the index is empty. @@ -645,7 +645,7 @@ type ChainIndexerConfig struct { // Default: false // // Note: The number of tipsets reconciled (i.e. indexed) during this process can be - // controlled using the `MaxReconcileTipsets` option. + // controlled using the MaxReconcileTipsets option. ReconcileEmptyIndex bool // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index cd4fa9ce8a4..f3e36f5ce26 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -1602,12 +1602,12 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) } if ts.Height() >= head.Height() { - return nil, xerrors.New("cannot ask for events for a tipset >= head") + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } } if pf.minHeight >= head.Height() || pf.maxHeight >= head.Height() { - return nil, xerrors.New("cannot ask for events for a tipset >= head") + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } ef := &chainindex.EventFilter{ @@ -1750,7 +1750,8 @@ func (e *EthEventHandler) parseEthFilterSpec(filterSpec *ethtypes.EthFilterSpec) tipsetCid = filterSpec.BlockHash.ToCid() } else { var err error - minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height(), filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) + // Because of deferred execution, we need to subtract 1 from the heaviest tipset height for the "heaviest" parameter + minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height()-1, filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) if err != nil { return nil, err } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 5a1609c8dc0..ec115412482 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -24,6 +24,7 @@ import ( func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { if cfg.DisableIndexer { + log.Infof("ChainIndexer is disabled") return nil, nil } From fed08b0cdca1ecea9881c8b4fbc32d71c105d1af Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 9 Sep 2024 18:27:12 +0400 Subject: [PATCH 28/66] fix bugs based on calibnet testing --- chainindex/events.go | 237 +++++++++++++++++++++------------------- chainindex/indexer.go | 5 + chainindex/interface.go | 2 +- 3 files changed, 129 insertions(+), 115 deletions(-) diff --git a/chainindex/events.go b/chainindex/events.go index 7e1859b4f6d..6ea487fe3cc 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "database/sql" - "errors" "fmt" "math" "sort" @@ -204,7 +203,7 @@ func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventF } default: // Filter doesn't specify a specific tipset - return ErrNotFound + return nil } // If we couldn't determine a specific tipset, return ErrNotFound @@ -244,142 +243,152 @@ func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi // Returns nil, ErrNotFound if the filter has no matching events and the tipset is not indexed // Returns nil, err for all other errors func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, excludeReverted bool) ([]*CollectedEvent, error) { - if err := si.sanityCheckFilter(ctx, f); err != nil { - return nil, xerrors.Errorf("event filter is invalid: %w", err) - } - - values, query := makePrefillFilterQuery(f, excludeReverted) + getEventsFnc := func(stmt *sql.Stmt, values []any) ([]*CollectedEvent, error) { + q, err := stmt.QueryContext(ctx, values...) + if err != nil { + return nil, xerrors.Errorf("failed to query events: %w", err) + } + defer func() { _ = q.Close() }() - stmt, err := si.db.Prepare(query) - if err != nil { - return nil, xerrors.Errorf("prepare prefill query: %w", err) - } - defer func() { _ = stmt.Close() }() + var ces []*CollectedEvent + var currentID int64 = -1 + var ce *CollectedEvent - q, err := stmt.QueryContext(ctx, values...) - if err == sql.ErrNoRows { - // did not find events, but may be in head, so wait for it and check again - if err := si.waitTillHeadIndexed(ctx); err != nil { - return nil, xerrors.Errorf("failed to wait for head to be indexed: %w", err) - } - q, err = stmt.QueryContext(ctx, values...) - } + for q.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } - if err != nil { - // if no rows are found, we should differentiate between no events for the tipset(which is valid and can happen) - // and the tipset not being indexed - if errors.Is(err, sql.ErrNoRows) { - return nil, si.checkTipsetIndexedStatus(ctx, f) - } - return nil, xerrors.Errorf("failed to query events: %w", err) - } - defer func() { _ = q.Close() }() + var row struct { + id int64 + height uint64 + tipsetKeyCid []byte + emitterAddr []byte + eventIndex int + messageCid []byte + messageIndex int + reverted bool + flags []byte + key string + codec uint64 + value []byte + } - var ces []*CollectedEvent - var currentID int64 = -1 - var ce *CollectedEvent + if err := q.Scan( + &row.id, + &row.height, + &row.tipsetKeyCid, + &row.emitterAddr, + &row.eventIndex, + &row.messageCid, + &row.messageIndex, + &row.reverted, + &row.flags, + &row.key, + &row.codec, + &row.value, + ); err != nil { + return nil, xerrors.Errorf("read prefill row: %w", err) + } - for q.Next() { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } + if row.id != currentID { + if ce != nil { + ces = append(ces, ce) + ce = nil + // Unfortunately we can't easily incorporate the max results limit into the query due to the + // unpredictable number of rows caused by joins + // Break here to stop collecting rows + if f.MaxResults > 0 && len(ces) >= f.MaxResults { + break + } + } - var row struct { - id int64 - height uint64 - tipsetKeyCid []byte - emitterAddr []byte - eventIndex int - messageCid []byte - messageIndex int - reverted bool - flags []byte - key string - codec uint64 - value []byte - } + currentID = row.id + ce = &CollectedEvent{ + EventIdx: row.eventIndex, + Reverted: row.reverted, + Height: abi.ChainEpoch(row.height), + MsgIdx: row.messageIndex, + } - if err := q.Scan( - &row.id, - &row.height, - &row.tipsetKeyCid, - &row.emitterAddr, - &row.eventIndex, - &row.messageCid, - &row.messageIndex, - &row.reverted, - &row.flags, - &row.key, - &row.codec, - &row.value, - ); err != nil { - return nil, xerrors.Errorf("read prefill row: %w", err) - } + ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) + if err != nil { + return nil, xerrors.Errorf("parse emitter addr: %w", err) + } - if row.id != currentID { - if ce != nil { - ces = append(ces, ce) - ce = nil - // Unfortunately we can't easily incorporate the max results limit into the query due to the - // unpredictable number of rows caused by joins - // Break here to stop collecting rows - if f.MaxResults > 0 && len(ces) >= f.MaxResults { - break + tsKeyCid, err := cid.Cast(row.tipsetKeyCid) + if err != nil { + return nil, xerrors.Errorf("parse tipsetkey cid: %w", err) } - } - currentID = row.id - ce = &CollectedEvent{ - EventIdx: row.eventIndex, - Reverted: row.reverted, - Height: abi.ChainEpoch(row.height), - MsgIdx: row.messageIndex, - } + ts, err := si.cs.GetTipSetByCid(ctx, tsKeyCid) + if err != nil { + return nil, xerrors.Errorf("get tipset by cid: %w", err) + } - ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) - if err != nil { - return nil, xerrors.Errorf("parse emitter addr: %w", err) - } + ce.TipSetKey = ts.Key() - tsKeyCid, err := cid.Cast(row.tipsetKeyCid) - if err != nil { - return nil, xerrors.Errorf("parse tipsetkey cid: %w", err) + ce.MsgCid, err = cid.Cast(row.messageCid) + if err != nil { + return nil, xerrors.Errorf("parse message cid: %w", err) + } } - ts, err := si.cs.GetTipSetByCid(ctx, tsKeyCid) - if err != nil { - return nil, xerrors.Errorf("get tipset by cid: %w", err) - } + ce.Entries = append(ce.Entries, types.EventEntry{ + Flags: row.flags[0], + Key: row.key, + Codec: row.codec, + Value: row.value, + }) + } - ce.TipSetKey = ts.Key() + if ce != nil { + ces = append(ces, ce) + } - ce.MsgCid, err = cid.Cast(row.messageCid) - if err != nil { - return nil, xerrors.Errorf("parse message cid: %w", err) - } + if len(ces) == 0 { + return nil, nil } - ce.Entries = append(ce.Entries, types.EventEntry{ - Flags: row.flags[0], - Key: row.key, - Codec: row.codec, - Value: row.value, - }) + // collected event list is in inverted order since we selected only the most recent events + // sort it into height order + sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) + + return ces, nil } - if ce != nil { - ces = append(ces, ce) + if err := si.sanityCheckFilter(ctx, f); err != nil { + return nil, xerrors.Errorf("event filter is invalid: %w", err) } - if len(ces) == 0 { - return nil, nil + values, query := makePrefillFilterQuery(f, excludeReverted) + + stmt, err := si.db.Prepare(query) + if err != nil { + return nil, xerrors.Errorf("prepare prefill query: %w", err) } + defer func() { _ = stmt.Close() }() - // collected event list is in inverted order since we selected only the most recent events - // sort it into height order - sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) + ces, err := getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + if len(ces) == 0 { + // there's no matching events for the filter, wait till index has caught up to the head and then retry + if err := si.waitTillHeadIndexed(ctx); err != nil { + return nil, xerrors.Errorf("failed to wait for head to be indexed: %w", err) + } + ces, err = getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + + if len(ces) == 0 { + return nil, si.checkTipsetIndexedStatus(ctx, f) + } + } return ces, nil } diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 5e9ab8697e5..5e4087ed3d0 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -232,6 +232,11 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip return xerrors.Errorf("failed to mark events as reverted: %w", err) } + if len(missingTipsets) == 0 { + log.Info("No missing tipsets to reconcile; index is all caught up with the chain") + return nil + } + log.Infof("Applying %d missing tipsets to Index; max missing tipset height %d; min missing tipset height %d", len(missingTipsets), missingTipsets[0].Height(), missingTipsets[len(missingTipsets)-1].Height()) totalIndexed := 0 diff --git a/chainindex/interface.go b/chainindex/interface.go index 327349d4892..b57a0eac1a6 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var ErrNotFound = errors.New("message not found") +var ErrNotFound = errors.New("required data not found in index") var ErrClosed = errors.New("index closed") // MsgInfo is the Message metadata the index tracks. From a5c56c1c8d01c1004f322d4a153b34e07ad74ef8 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 12:53:52 +0400 Subject: [PATCH 29/66] create sqliite Indices --- chainindex/ddls.go | 6 +++--- chainindex/gc.go | 14 ++++++++++---- chainindex/indexer.go | 6 ------ chainindex/read.go | 12 ++---------- 4 files changed, 15 insertions(+), 23 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 81173465e15..6a624a4df84 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -14,8 +14,6 @@ const ( stmtUpdateTipsetToReverted = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" - stmtGetMaxNonRevertedTipset = "SELECT tipset_key_cid FROM tipset_message WHERE reverted = 0 ORDER BY height DESC LIMIT 1" - stmtRemoveRevertedTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ? AND reverted = 1" stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" @@ -39,7 +37,7 @@ const ( SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? )` - stmtGetMsgIdForMsgCidAndTipset = `SELECT message_id FROM tipset_message WHERE message_cid = ? AND tipset_key_cid = ?` + stmtGetMsgIdForMsgCidAndTipset = `SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ?AND reverted = 0` stmtInsertEvent = "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?)" stmtInsertEventEntry = "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)" @@ -91,4 +89,6 @@ var ddls = []string{ `CREATE INDEX IF NOT EXISTS idx_event_message_id ON event (message_id)`, `CREATE INDEX IF NOT EXISTS idx_height ON tipset_message (height)`, + + `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id)`, } diff --git a/chainindex/gc.go b/chainindex/gc.go index b9c1a7fad1d..42e407ad416 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -41,15 +41,19 @@ func (si *SqliteIndexer) gc(ctx context.Context) { if si.gcRetentionDays <= 0 { return } + log.Info("starting index gc") head := si.cs.GetHeaviestTipSet() retentionEpochs := si.gcRetentionDays * builtin.EpochsInDay removalEpoch := int64(head.Height()) - retentionEpochs - 10 // 10 is for some grace period if removalEpoch <= 0 { + log.Info("no tipsets to gc") return } + log.Infof("gc'ing all(reverted and non-reverted) tipsets before epoch %d", removalEpoch) + res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) if err != nil { log.Errorw("failed to remove reverted tipsets before height", "height", removalEpoch, "error", err) @@ -62,13 +66,13 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - log.Infow("gc'd tipsets", "height", removalEpoch, "nRows", rows) - + log.Infof("gc'd %d tipsets before epoch %d", rows, removalEpoch) // Also GC eth hashes + log.Infof("gc'ing eth hashes older than %d days", si.gcRetentionDays) res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(si.gcRetentionDays)) + " day") if err != nil { - log.Errorw("failed to delete eth hashes older than", "error", err) + log.Errorw("failed to gc eth hashes older than", "error", err) return } @@ -78,7 +82,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - log.Infow("gc'd eth hashes", "height", removalEpoch, "nRows", rows) + log.Infof("gc'd %d eth hashes older than %d days", rows, si.gcRetentionDays) } func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { @@ -88,6 +92,8 @@ func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { return } + log.Infof("cleaning up all reverted tipsets before epoch %d as it is now final", finalEpoch) + // remove all entries from the `tipsets` table where `reverted=true` and height is < finalEpoch // cascade delete based on foreign key constraints takes care of cleaning up the other tables res, err := si.removeRevertedTipsetsBeforeHeightStmt.ExecContext(ctx, finalEpoch) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 5e4087ed3d0..e80fb2a23f4 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -37,7 +37,6 @@ type SqliteIndexer struct { getMsgCidFromEthHashStmt *sql.Stmt insertTipsetMessageStmt *sql.Stmt updateTipsetToRevertedStmt *sql.Stmt - getMaxNonRevertedTipsetStmt *sql.Stmt hasTipsetStmt *sql.Stmt updateTipsetToNonRevertedStmt *sql.Stmt removeRevertedTipsetsBeforeHeightStmt *sql.Stmt @@ -342,11 +341,6 @@ func (si *SqliteIndexer) prepareStatements() error { return xerrors.Errorf("prepare %s: %w", "updateTipsetToRevertedStmt", err) } - si.getMaxNonRevertedTipsetStmt, err = si.db.Prepare(stmtGetMaxNonRevertedTipset) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "getMaxNonRevertedTipsetStmt", err) - } - si.removeRevertedTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveRevertedTipsetsBeforeHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "removeRevertedTipsetsBeforeHeightStmt", err) diff --git a/chainindex/read.go b/chainindex/read.go index 660551683a7..0a38189cfd6 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -109,7 +109,7 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { } // is it already indexed? - if exists, err := si.hasNonRevertedTipset(ctx, headTsKeyCidBytes); err != nil { + if exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes); err != nil { return xerrors.Errorf("failed to check if tipset exists: %w", err) } else if exists { return nil @@ -120,7 +120,7 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { defer unsubFn() for ctx.Err() == nil { - exists, err := si.hasNonRevertedTipset(ctx, headTsKeyCidBytes) + exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) if err != nil { return xerrors.Errorf("failed to check if tipset exists: %w", err) } else if exists { @@ -137,14 +137,6 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { return ctx.Err() } -func (si *SqliteIndexer) hasNonRevertedTipset(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { - var exists bool - if err := si.hasNonRevertedTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { - return false, xerrors.Errorf("failed to check if tipset is indexed and non-reverted: %w", err) - } - return exists, nil -} - func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { var exists bool if err := si.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { From 7acd481a4cef6f1ac6bb8d4b2bdfecd4d292481c Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 13:25:40 +0400 Subject: [PATCH 30/66] gc should be based on epochs --- chainindex/gc.go | 25 +++++++++++++++------- chainindex/indexer.go | 6 +++--- documentation/en/default-lotus-config.toml | 6 +++--- node/config/def.go | 2 +- node/config/doc_gen.go | 4 ++-- node/config/types.go | 4 ++-- node/modules/chainindex.go | 2 +- 7 files changed, 29 insertions(+), 20 deletions(-) diff --git a/chainindex/gc.go b/chainindex/gc.go index 42e407ad416..78df5ccd86f 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -38,15 +38,15 @@ func (si *SqliteIndexer) gcLoop() { } func (si *SqliteIndexer) gc(ctx context.Context) { - if si.gcRetentionDays <= 0 { + if si.gcRetentionEpochs <= 0 { + log.Info("gc retention epochs is not set, skipping gc") return } log.Info("starting index gc") head := si.cs.GetHeaviestTipSet() - retentionEpochs := si.gcRetentionDays * builtin.EpochsInDay - removalEpoch := int64(head.Height()) - retentionEpochs - 10 // 10 is for some grace period + removalEpoch := int64(head.Height()) - si.gcRetentionEpochs - 10 // 10 is for some grace period if removalEpoch <= 0 { log.Info("no tipsets to gc") return @@ -67,22 +67,31 @@ func (si *SqliteIndexer) gc(ctx context.Context) { } log.Infof("gc'd %d tipsets before epoch %d", rows, removalEpoch) + + // ------------------------------------------------------------------------------------------------- // Also GC eth hashes - log.Infof("gc'ing eth hashes older than %d days", si.gcRetentionDays) - res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(si.gcRetentionDays)) + " day") + // Convert gcRetentionEpochs to number of days + gcRetentionDays := si.gcRetentionEpochs / (builtin.EpochsInDay) + if gcRetentionDays < 1 { + log.Infof("skipping gc of eth hashes as retention days is less than 1") + return + } + + log.Infof("gc'ing eth hashes older than %d days", gcRetentionDays) + res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(gcRetentionDays)) + " day") if err != nil { - log.Errorw("failed to gc eth hashes older than", "error", err) + log.Errorf("failed to gc eth hashes older than %d days: %w", gcRetentionDays, err) return } rows, err = res.RowsAffected() if err != nil { - log.Errorw("failed to get rows affected", "error", err) + log.Errorf("failed to get rows affected: %w", err) return } - log.Infof("gc'd %d eth hashes older than %d days", rows, si.gcRetentionDays) + log.Infof("gc'd %d eth hashes older than %d days", rows, gcRetentionDays) } func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { diff --git a/chainindex/indexer.go b/chainindex/indexer.go index e80fb2a23f4..39a0405df44 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -53,7 +53,7 @@ type SqliteIndexer struct { insertEventStmt *sql.Stmt insertEventEntryStmt *sql.Stmt - gcRetentionDays int64 + gcRetentionEpochs int64 reconcileEmptyIndex bool maxReconcileTipsets int @@ -65,7 +65,7 @@ type SqliteIndexer struct { closed bool } -func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconcileEmptyIndex bool, +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, maxReconcileTipsets int) (si *SqliteIndexer, err error) { db, _, err := sqlite.Open(path) if err != nil { @@ -93,7 +93,7 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionDays int64, reconci cs: cs, updateSubs: make(map[uint64]*updateSub), subIdCounter: 0, - gcRetentionDays: gcRetentionDays, + gcRetentionEpochs: gcRetentionEpochs, reconcileEmptyIndex: reconcileEmptyIndex, maxReconcileTipsets: maxReconcileTipsets, } diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 7914752edd0..d839dcdf329 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -308,15 +308,15 @@ # env var: LOTUS_CHAININDEXER_DISABLEINDEXER #DisableIndexer = false - # GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. + # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. # The garbage collection (GC) process removes data older than this retention period. # Setting this to 0 disables GC, preserving all historical data indefinitely. # # Default: 0 (GC disabled) # # type: int64 - # env var: LOTUS_CHAININDEXER_GCRETENTIONDAYS - #GCRetentionDays = 0 + # env var: LOTUS_CHAININDEXER_GCRETENTIONEPOCHS + #GCRetentionEpochs = 0 # ReconcileEmptyIndex determines whether to reconcile the index with the chain state # during startup when the index is empty. diff --git a/node/config/def.go b/node/config/def.go index 79f1013631d..7cf8a29e7a5 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -97,7 +97,7 @@ func DefaultFullNode() *FullNode { }, ChainIndexer: ChainIndexerConfig{ DisableIndexer: false, - GCRetentionDays: 0, + GCRetentionEpochs: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, }, diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 1da78d7fbd2..53ea866a26d 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -87,10 +87,10 @@ It is strongly recommended to keep this set to false unless you have a specific and fully understand the implications.`, }, { - Name: "GCRetentionDays", + Name: "GCRetentionEpochs", Type: "int64", - Comment: `GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. + Comment: `GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. The garbage collection (GC) process removes data older than this retention period. Setting this to 0 disables GC, preserving all historical data indefinitely. diff --git a/node/config/types.go b/node/config/types.go index 30a6e9c7188..74c4c3972e4 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -625,12 +625,12 @@ type ChainIndexerConfig struct { // and fully understand the implications. DisableIndexer bool - // GCRetentionDays specifies the duration (in days) for which data is retained in the Indexer. + // GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. // The garbage collection (GC) process removes data older than this retention period. // Setting this to 0 disables GC, preserving all historical data indefinitely. // // Default: 0 (GC disabled) - GCRetentionDays int64 + GCRetentionEpochs int64 // ReconcileEmptyIndex determines whether to reconcile the index with the chain state // during startup when the index is empty. diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index ec115412482..3f306affaf8 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -35,7 +35,7 @@ func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx help // TODO Implement config driven auto-backfilling chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), - cs, cfg.GCRetentionDays, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) + cs, cfg.GCRetentionEpochs, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) if err != nil { return nil, err } From 821dcd4156589f30aeedd8d781ef28f7c5cf2d3a Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 13:31:57 +0400 Subject: [PATCH 31/66] fix event query --- chainindex/events.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chainindex/events.go b/chainindex/events.go index 6ea487fe3cc..34ab83a6db9 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -67,7 +67,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // read message id for this message cid and tipset key cid var messageID int64 - if err := tx.Stmt(si.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgCidBytes, msgTsKeyCidBytes).Scan(&messageID); err != nil { + if err := tx.Stmt(si.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) } if messageID == 0 { From cde46cbc995fa5027188509fb57ff1662228ba9e Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 15:10:46 +0400 Subject: [PATCH 32/66] foreign keys should be enabled on the DB --- chainindex/indexer.go | 2 +- lib/sqlite/sqlite.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 39a0405df44..edf2f35b361 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -83,7 +83,7 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, recon err = sqlite.InitDb(ctx, "chain index", db, ddls, []sqlite.MigrationFunc{}) if err != nil { - return nil, xerrors.Errorf("failed to init message index db: %w", err) + return nil, xerrors.Errorf("failed to init chain index db: %w", err) } si = &SqliteIndexer{ diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index 0274a0c71f2..039be43233d 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -70,6 +70,16 @@ func Open(path string) (*sql.DB, bool, error) { } } + var foreignKeysEnabled int + if err := db.QueryRow("PRAGMA foreign_keys;").Scan(&foreignKeysEnabled); err != nil { + return nil, false, xerrors.Errorf("failed to check foreign keys setting: %w", err) + } + if foreignKeysEnabled == 0 { + return nil, false, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) + } + + log.Infof("Database [@ %s] opened successfully with foreign keys enabled", path) + return db, exists, nil } From 727dae383272e04cdcb71ebcd865923236c643aa Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 15:17:56 +0400 Subject: [PATCH 33/66] reverted tipsets should be removed as part of GC --- chainindex/ddls.go | 3 +-- chainindex/gc.go | 31 +------------------------------ chainindex/indexer.go | 6 ------ node/modules/chainindex.go | 8 ++++---- 4 files changed, 6 insertions(+), 42 deletions(-) diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 6a624a4df84..76aadb92523 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -14,8 +14,7 @@ const ( stmtUpdateTipsetToReverted = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" - stmtRemoveRevertedTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ? AND reverted = 1" - stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" + stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" stmtRemoveEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` diff --git a/chainindex/gc.go b/chainindex/gc.go index 78df5ccd86f..6b4b1e336d8 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -8,19 +8,17 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/policy" ) var ( log = logging.Logger("chainindex") - cleanupInterval = time.Duration(8) * time.Hour + cleanupInterval = time.Duration(4) * time.Hour ) func (si *SqliteIndexer) gcLoop() { defer si.wg.Done() // Initial cleanup before entering the loop - si.cleanupRevertedTipsets(si.ctx) si.gc(si.ctx) cleanupTicker := time.NewTicker(cleanupInterval) @@ -29,7 +27,6 @@ func (si *SqliteIndexer) gcLoop() { for si.ctx.Err() == nil { select { case <-cleanupTicker.C: - si.cleanupRevertedTipsets(si.ctx) si.gc(si.ctx) case <-si.ctx.Done(): return @@ -93,29 +90,3 @@ func (si *SqliteIndexer) gc(ctx context.Context) { log.Infof("gc'd %d eth hashes older than %d days", rows, gcRetentionDays) } - -func (si *SqliteIndexer) cleanupRevertedTipsets(ctx context.Context) { - head := si.cs.GetHeaviestTipSet() - finalEpoch := (head.Height() - policy.ChainFinality) - 10 // 10 is for some grace period - if finalEpoch <= 0 { - return - } - - log.Infof("cleaning up all reverted tipsets before epoch %d as it is now final", finalEpoch) - - // remove all entries from the `tipsets` table where `reverted=true` and height is < finalEpoch - // cascade delete based on foreign key constraints takes care of cleaning up the other tables - res, err := si.removeRevertedTipsetsBeforeHeightStmt.ExecContext(ctx, finalEpoch) - if err != nil { - log.Errorw("failed to remove reverted tipsets before height", "height", finalEpoch, "error", err) - return - } - - rows, err := res.RowsAffected() - if err != nil { - log.Errorw("failed to get rows affected", "error", err) - return - } - - log.Infow("removed reverted tipsets", "height", finalEpoch, "nRows", rows) -} diff --git a/chainindex/indexer.go b/chainindex/indexer.go index edf2f35b361..5d1b22402e7 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -39,7 +39,6 @@ type SqliteIndexer struct { updateTipsetToRevertedStmt *sql.Stmt hasTipsetStmt *sql.Stmt updateTipsetToNonRevertedStmt *sql.Stmt - removeRevertedTipsetsBeforeHeightStmt *sql.Stmt removeTipsetsBeforeHeightStmt *sql.Stmt removeEthHashesOlderThanStmt *sql.Stmt updateTipsetsToRevertedFromHeightStmt *sql.Stmt @@ -341,11 +340,6 @@ func (si *SqliteIndexer) prepareStatements() error { return xerrors.Errorf("prepare %s: %w", "updateTipsetToRevertedStmt", err) } - si.removeRevertedTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveRevertedTipsetsBeforeHeight) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "removeRevertedTipsetsBeforeHeightStmt", err) - } - si.removeTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveTipsetsBeforeHeight) if err != nil { return xerrors.Errorf("prepare %s: %w", "removeTipsetsBeforeHeightStmt", err) diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 3f306affaf8..dd26e303bb0 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -70,10 +70,6 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind return *actor.DelegatedAddress, true }) - if err := indexer.Start(); err != nil { - return err - } - ev, err := events.NewEvents(ctx, &evapi) if err != nil { return err @@ -99,6 +95,10 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind } go chainindex.WaitForMpoolUpdates(ctx, ch, indexer) + if err := indexer.Start(); err != nil { + return err + } + return nil }, }) From c07784d68692b331f95d037aaba807d60013afcc Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 16:27:02 +0400 Subject: [PATCH 34/66] release read lock --- chainindex/indexer.go | 5 +++++ chainindex/read.go | 2 ++ 2 files changed, 7 insertions(+) diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 5d1b22402e7..52c22097b8d 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -129,6 +129,7 @@ func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddr func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return ErrClosed } si.closeLk.RUnlock() @@ -406,6 +407,7 @@ func (si *SqliteIndexer) prepareStatements() error { func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, msgCid cid.Cid) error { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return ErrClosed } si.closeLk.RUnlock() @@ -421,6 +423,7 @@ func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.Sign } si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return ErrClosed } si.closeLk.RUnlock() @@ -457,6 +460,7 @@ func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return ErrClosed } si.closeLk.RUnlock() @@ -483,6 +487,7 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return ErrClosed } si.closeLk.RUnlock() diff --git a/chainindex/read.go b/chainindex/read.go index 0a38189cfd6..8aefb6d6b08 100644 --- a/chainindex/read.go +++ b/chainindex/read.go @@ -18,6 +18,7 @@ const headIndexedWaitTimeout = 5 * time.Second func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return cid.Undef, ErrClosed } si.closeLk.RUnlock() @@ -45,6 +46,7 @@ func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash etht func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { si.closeLk.RLock() if si.closed { + si.closeLk.RUnlock() return nil, ErrClosed } si.closeLk.RUnlock() From 896048a8d511e101d5ff934e0010d3e5c89b2581 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 16:58:50 +0400 Subject: [PATCH 35/66] make it easy to backfill an empty index using reconciliation --- chainindex/indexer.go | 170 --------------------------------- chainindex/reconcile.go | 207 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+), 170 deletions(-) create mode 100644 chainindex/reconcile.go diff --git a/chainindex/indexer.go b/chainindex/indexer.go index 52c22097b8d..a936b49a30a 100644 --- a/chainindex/indexer.go +++ b/chainindex/indexer.go @@ -113,176 +113,6 @@ func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddr si.idToRobustAddrFunc = idToRobustAddrFunc } -// ReconcileWithChain ensures that the index is consistent with the current chain state. -// It performs the following steps: -// 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. -// 2. Finds the lowest non-reverted height in the index. -// 3. Walks backwards from the current chain head until it finds a tipset that exists -// in the index and is not marked as reverted. -// 4. Sets a boundary epoch just above this found tipset. -// 5. Marks all tipsets above this boundary as reverted, ensuring consistency with the current chain state. -// 6. Applies all missing un-indexed tipsets starting from the last matching tipset b/w index and canonical chain -// to the current chain head. -// -// This function is crucial for maintaining index integrity, especially after chain reorgs. -// It ensures that the index accurately reflects the current state of the blockchain. -func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { - si.closeLk.RLock() - if si.closed { - si.closeLk.RUnlock() - return ErrClosed - } - si.closeLk.RUnlock() - - if head == nil { - return nil - } - - return withTx(ctx, si.db, func(tx *sql.Tx) error { - var hasTipset bool - err := tx.StmtContext(ctx, si.isTipsetMessageNonEmptyStmt).QueryRowContext(ctx).Scan(&hasTipset) - if err != nil { - return xerrors.Errorf("failed to check if tipset message is empty: %w", err) - } - - isIndexEmpty := !hasTipset - if isIndexEmpty && !si.reconcileEmptyIndex { - log.Info("Chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") - return nil - } - - // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk - var reconciliationEpoch abi.ChainEpoch - if isIndexEmpty { - reconciliationEpoch = 0 - } else { - var result int64 - row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("failed to scan minimum non-reverted height %w", err) - } - reconciliationEpoch = abi.ChainEpoch(result) - } - - currTs := head - log.Infof("Starting chain reconciliation from head height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) - var missingTipsets []*types.TipSet - - // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset - // in the db so we know where to start reconciliation from - // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted - // All tpsets that exist in the canonical chain but not in the db are then applied - for currTs != nil && currTs.Height() >= reconciliationEpoch { - tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) - if err != nil { - return xerrors.Errorf("failed to compute tipset cid: %w", err) - } - - var exists bool - err = tx.StmtContext(ctx, si.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) - if err != nil { - return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) - } - - if exists { - // found it! - reconciliationEpoch = currTs.Height() + 1 - log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) - break - } - - if len(missingTipsets) <= si.maxReconcileTipsets { - missingTipsets = append(missingTipsets, currTs) - } else if isIndexEmpty { - // if chain index is empty, we can short circuit here as the index has minimum epoch for reconciliation i.e. it is always 0 - break - } - - if currTs.Height() == 0 { - log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") - break - } - - parents := currTs.Parents() - currTs, err = si.cs.GetTipSetFromKey(ctx, parents) - if err != nil { - return xerrors.Errorf("failed to walk chain: %w", err) - } - } - - if currTs == nil && !isIndexEmpty { - log.Warn("ReconcileWithChain reached genesis without finding matching tipset") - } - - // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain - log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) - result, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) - if err != nil { - return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) - } - rowsAffected, err := result.RowsAffected() - if err != nil { - return xerrors.Errorf("failed to get number of rows affected: %w", err) - } - log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) - - // also need to mark events as reverted for the corresponding inclusion tipsets - if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { - return xerrors.Errorf("failed to mark events as reverted: %w", err) - } - - if len(missingTipsets) == 0 { - log.Info("No missing tipsets to reconcile; index is all caught up with the chain") - return nil - } - - log.Infof("Applying %d missing tipsets to Index; max missing tipset height %d; min missing tipset height %d", len(missingTipsets), - missingTipsets[0].Height(), missingTipsets[len(missingTipsets)-1].Height()) - totalIndexed := 0 - // apply all missing tipsets from the canonical chain to the current chain head - for i := 0; i < len(missingTipsets); i++ { - currTs := missingTipsets[i] - var parentTs *types.TipSet - var err error - - if i < len(missingTipsets)-1 { - parentTs = missingTipsets[i+1] - } else if currTs.Height() > 0 { - parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) - if err != nil { - return xerrors.Errorf("failed to get parent tipset: %w", err) - } - } else if currTs.Height() == 0 { - if err := si.indexTipset(ctx, tx, currTs); err != nil { - log.Warnf("failed to index genesis tipset during reconciliation: %s", err) - } else { - totalIndexed++ - } - break - } - - if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { - log.Warnf("failed to index tipset with parent events during reconciliation: %s", err) - // the above could have failed because of missing messages for `parentTs` in the chainstore - // so try to index only the currentTs and then halt the reconciliation process as we've - // reached the end of what we have in the chainstore - if err := si.indexTipset(ctx, tx, currTs); err != nil { - log.Warnf("failed to index tipset during reconciliation: %s", err) - } else { - totalIndexed++ - } - break - } - - totalIndexed++ - } - - log.Infof("Indexed %d missing tipsets during reconciliation", totalIndexed) - - return nil - }) -} - func (si *SqliteIndexer) Close() error { si.closeLk.Lock() defer si.closeLk.Unlock() diff --git a/chainindex/reconcile.go b/chainindex/reconcile.go new file mode 100644 index 00000000000..b1d4ebeaf8a --- /dev/null +++ b/chainindex/reconcile.go @@ -0,0 +1,207 @@ +package chainindex + +import ( + "context" + "database/sql" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +// ReconcileWithChain ensures that the index is consistent with the current chain state. +// It performs the following steps: +// 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. +// 2. Finds the lowest non-reverted height in the index. +// 3. Walks backwards from the current chain head until it finds a tipset that exists +// in the index and is not marked as reverted. +// 4. Sets a boundary epoch just above this found tipset. +// 5. Marks all tipsets above this boundary as reverted, ensuring consistency with the current chain state. +// 6. Applies all missing un-indexed tipsets starting from the last matching tipset b/w index and canonical chain +// to the current chain head. +// +// This function is crucial for maintaining index integrity, especially after chain reorgs. +// It ensures that the index accurately reflects the current state of the blockchain. +func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { + si.closeLk.RLock() + if si.closed { + si.closeLk.RUnlock() + return ErrClosed + } + si.closeLk.RUnlock() + + if head == nil { + return nil + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + var hasTipset bool + err := tx.StmtContext(ctx, si.isTipsetMessageNonEmptyStmt).QueryRowContext(ctx).Scan(&hasTipset) + if err != nil { + return xerrors.Errorf("failed to check if tipset message is empty: %w", err) + } + + isIndexEmpty := !hasTipset + if isIndexEmpty && !si.reconcileEmptyIndex { + log.Info("Chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") + return nil + } + + if isIndexEmpty { + log.Info("Chain index is empty; backfilling from head") + return si.backfillEmptyIndex(ctx, tx, head) + } + + // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk + var reconciliationEpoch abi.ChainEpoch + row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) + if err := row.Scan(&reconciliationEpoch); err != nil { + return xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + } + + currTs := head + log.Infof("Starting chain reconciliation from head height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) + var missingTipsets []*types.TipSet + + // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset + // in the db so we know where to start reconciliation from + // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted + // All tpsets that exist in the canonical chain but not in the db are then applied + for currTs != nil && currTs.Height() >= reconciliationEpoch { + tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) + if err != nil { + return xerrors.Errorf("failed to compute tipset cid: %w", err) + } + + var exists bool + err = tx.StmtContext(ctx, si.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + if err != nil { + return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) + } + + if exists { + // found it! + reconciliationEpoch = currTs.Height() + 1 + log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) + break + } + + if len(missingTipsets) <= si.maxReconcileTipsets { + missingTipsets = append(missingTipsets, currTs) + } + // even if len(missingTipsets) > si.maxReconcileTipsets, we still need to continue the walk + // to find the reconciliation epoch so we can mark the indexed tipsets not in the main chain as reverted + + if currTs.Height() == 0 { + log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") + break + } + + parents := currTs.Parents() + currTs, err = si.cs.GetTipSetFromKey(ctx, parents) + if err != nil { + return xerrors.Errorf("failed to walk chain: %w", err) + } + } + + if currTs.Height() == 0 { + log.Warn("ReconcileWithChain reached genesis without finding matching tipset") + } + + // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain + log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) + result, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) + if err != nil { + return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get number of rows affected: %w", err) + } + log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + + // also need to mark events as reverted for the corresponding inclusion tipsets + if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { + return xerrors.Errorf("failed to mark events as reverted: %w", err) + } + + return si.applyMissingTipsets(ctx, tx, missingTipsets) + }) +} + +func (si *SqliteIndexer) backfillEmptyIndex(ctx context.Context, tx *sql.Tx, head *types.TipSet) error { + currTs := head + var missingTipsets []*types.TipSet + + log.Infof("backfilling empty chain index from head height %d", head.Height()) + var err error + + for currTs != nil && len(missingTipsets) < si.maxReconcileTipsets { + missingTipsets = append(missingTipsets, currTs) + if currTs.Height() == 0 { + break + } + + currTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to walk chain: %w", err) + } + } + + return si.applyMissingTipsets(ctx, tx, missingTipsets) +} + +func (si *SqliteIndexer) applyMissingTipsets(ctx context.Context, tx *sql.Tx, missingTipsets []*types.TipSet) error { + if len(missingTipsets) == 0 { + log.Info("No missing tipsets to index; index is all caught up with the chain") + return nil + } + + log.Infof("Applying %d missing tipsets to Index; max missing tipset height %d; min missing tipset height %d", len(missingTipsets), + missingTipsets[0].Height(), missingTipsets[len(missingTipsets)-1].Height()) + totalIndexed := 0 + + // apply all missing tipsets from the canonical chain to the current chain head + for i := 0; i < len(missingTipsets); i++ { + currTs := missingTipsets[i] + var parentTs *types.TipSet + var err error + + if i < len(missingTipsets)-1 { + parentTs = missingTipsets[i+1] + } else if currTs.Height() > 0 { + parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } + } else if currTs.Height() == 0 { + if err := si.indexTipset(ctx, tx, currTs); err != nil { + log.Warnf("failed to index genesis tipset during reconciliation: %s", err) + } else { + totalIndexed++ + } + break + } + + if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { + log.Warnf("failed to index tipset with parent events during reconciliation: %s", err) + // the above could have failed because of missing messages for `parentTs` in the chainstore + // so try to index only the currentTs and then halt the reconciliation process as we've + // reached the end of what we have in the chainstore + if err := si.indexTipset(ctx, tx, currTs); err != nil { + log.Warnf("failed to index tipset during reconciliation: %s", err) + } else { + totalIndexed++ + } + break + } + + totalIndexed++ + } + + log.Infof("Indexed %d missing tipsets during reconciliation", totalIndexed) + + return nil +} From 602f6602445d574232ca7284135f54ee1c0625af Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Tue, 10 Sep 2024 17:24:08 +0400 Subject: [PATCH 36/66] better docs for reconciliation --- chainindex/reconcile.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/chainindex/reconcile.go b/chainindex/reconcile.go index b1d4ebeaf8a..213cea7b81a 100644 --- a/chainindex/reconcile.go +++ b/chainindex/reconcile.go @@ -69,6 +69,10 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip // in the db so we know where to start reconciliation from // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted // All tpsets that exist in the canonical chain but not in the db are then applied + + // we only need to walk back as far as the reconciliation epoch as all the tipsets in the index + // below the reconciliation epoch are already marked as reverted because the reconciliation epoch + // is the minimum non-reverted height in the index for currTs != nil && currTs.Height() >= reconciliationEpoch { tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) if err != nil { @@ -88,11 +92,11 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip break } - if len(missingTipsets) <= si.maxReconcileTipsets { + if len(missingTipsets) < si.maxReconcileTipsets { missingTipsets = append(missingTipsets, currTs) } - // even if len(missingTipsets) > si.maxReconcileTipsets, we still need to continue the walk - // to find the reconciliation epoch so we can mark the indexed tipsets not in the main chain as reverted + // even if len(missingTipsets) >= si.maxReconcileTipsets, we still need to continue the walk + // to find the final reconciliation epoch so we can mark the indexed tipsets not in the main chain as reverted if currTs.Height() == 0 { log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") From 13c28246d9341f508fb421b23046f3c4b00ee06b Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Thu, 12 Sep 2024 15:10:43 +0400 Subject: [PATCH 37/66] fix conflicts with master --- go.mod | 2 +- go.sum | 52 +++++++++++++++++++++---------------------- node/builder_chain.go | 3 --- node/impl/full/eth.go | 25 ++++++++++++++++----- 4 files changed, 47 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 2ef8195c6a8..15c6557ec49 100644 --- a/go.mod +++ b/go.mod @@ -114,7 +114,6 @@ require ( github.com/manifoldco/promptui v0.9.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.22 - github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.12.4 @@ -269,6 +268,7 @@ require ( github.com/miekg/dns v1.1.59 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect diff --git a/go.sum b/go.sum index dc634011ca8..193643a51b6 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/ github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= github.com/filecoin-project/go-f3 v0.2.0 h1:Gis44+hOrDjSUEw3IDmU7CudNILi5e+bb1pgZgp680k= github.com/filecoin-project/go-f3 v0.2.0/go.mod h1:43fBLX0iX0+Nnw4Z91wSrdfDYAd6YEDexy7GcLnIJtk= -github.com/filecoin-project/go-fil-commcid v0.2.0 h1:B+5UX8XGgdg/XsdUpST4pEBviKkFOw+Fvl2bLhSKGpI= -github.com/filecoin-project/go-fil-commcid v0.2.0/go.mod h1:8yigf3JDIil+/WpqR5zoKyP0jBPCOGtEqq/K1CcMy9Q= +github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= +github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 h1:HYIUugzjq78YvV3vC6rL95+SfC/aSTVSnZSZiDV5pCk= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0/go.mod h1:VH3fAFOru4yyWar4626IoS5+VGE8SfZiBODJLUigEo4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= @@ -1258,8 +1258,8 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/triplewz/poseidon v0.0.1 h1:G5bdkTzb9R5K5Dd3DIzBCp7rAErP1zWH0LW7Ip6bxIA= -github.com/triplewz/poseidon v0.0.1/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= +github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU= +github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -1439,8 +1439,8 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1451,8 +1451,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= -golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1477,8 +1477,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1539,8 +1539,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1565,8 +1565,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1658,8 +1658,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1673,8 +1673,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1688,14 +1688,14 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1751,14 +1751,14 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= +golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= diff --git a/node/builder_chain.go b/node/builder_chain.go index 552b253045d..2f0be6d503f 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -141,7 +141,6 @@ var ChainNode = Options( Override(new(full.StateModuleAPI), From(new(api.Gateway))), Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), Override(new(full.EthModuleAPI), From(new(api.Gateway))), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), Override(new(full.EthEventAPI), From(new(api.Gateway))), Override(new(full.ActorEventAPI), From(new(api.Gateway))), ), @@ -262,14 +261,12 @@ func ConfigFullNode(c interface{}) Option { If(cfg.Fevm.EnableEthRPC, Override(new(*full.EthEventHandler), modules.EthEventHandler(cfg.Events, cfg.Fevm.EnableEthRPC)), - Override(new(full.EthTxHashManager), modules.EthTxHashManager(cfg.Fevm)), Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), Override(new(full.EthEventAPI), From(new(*full.EthEventHandler))), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), Override(new(full.EthEventAPI), &full.EthModuleDummy{}), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), ), If(cfg.Events.EnableActorEventsAPI, diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index f3e36f5ce26..8a989058725 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -165,6 +165,8 @@ type EthAPI struct { Chain *store.ChainStore StateManager *stmgr.StateManager + ChainIndexer chainindex.Indexer + MpoolAPI MpoolAPI EthModuleAPI EthEventAPI @@ -938,6 +940,14 @@ func (a *EthModule) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) } func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, false) +} + +func (a *EthAPI) EthSendRawTransactionUntrusted(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, true) +} + +func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, indexer chainindex.Indexer, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { txArgs, err := ethtypes.ParseEthTransaction(rawTx) if err != nil { return ethtypes.EmptyEthHash, err @@ -953,15 +963,20 @@ func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.Et return ethtypes.EmptyEthHash, err } - _, err = a.MpoolAPI.MpoolPush(ctx, smsg) - if err != nil { - return ethtypes.EmptyEthHash, err + if untrusted { + if _, err = mpool.MpoolPushUntrusted(ctx, smsg); err != nil { + return ethtypes.EmptyEthHash, err + } + } else { + if _, err = mpool.MpoolPush(ctx, smsg); err != nil { + return ethtypes.EmptyEthHash, err + } } // make it immediately available in the transaction hash lookup db, even though it will also // eventually get there via the mpool - if a.ChainIndexer != nil { - if err := a.ChainIndexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { + if indexer != nil { + if err := indexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { log.Errorf("error indexing tx: %s", err) } } From 37d67464996f7d752ee87011c820917c4e1e7074 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Fri, 13 Sep 2024 16:28:51 +0400 Subject: [PATCH 38/66] Apply suggestions from code review Co-authored-by: Rod Vagg --- chain/gen/gen.go | 3 +-- chainindex/ddls.go | 4 ++-- chainindex/events.go | 2 +- chainindex/gc.go | 2 +- chainindex/helpers.go | 1 - chainindex/interface.go | 2 +- chainindex/reconcile.go | 8 ++++--- documentation/en/default-lotus-config.toml | 26 ++++++++++++++-------- 8 files changed, 28 insertions(+), 20 deletions(-) diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 00ea779887b..b05a14a5d55 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -257,8 +257,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, - nil) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, nil) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chainindex/ddls.go b/chainindex/ddls.go index 76aadb92523..4937972e3f3 100644 --- a/chainindex/ddls.go +++ b/chainindex/ddls.go @@ -62,8 +62,8 @@ var ddls = []string{ `CREATE TABLE IF NOT EXISTS event ( event_id INTEGER PRIMARY KEY, message_id INTEGER NOT NULL, - event_index INTEGER NOT NULL, - emitter_addr BLOB NOT NULL, + event_index INTEGER NOT NULL, + emitter_addr BLOB NOT NULL, reverted INTEGER NOT NULL, FOREIGN KEY (message_id) REFERENCES tipset_message(message_id) ON DELETE CASCADE, UNIQUE (message_id, event_index) diff --git a/chainindex/events.go b/chainindex/events.go index 34ab83a6db9..079c2470787 100644 --- a/chainindex/events.go +++ b/chainindex/events.go @@ -496,6 +496,6 @@ func makePrefillFilterQuery(f *EventFilter, excludeReverted bool) ([]any, string } // retain insertion order of event_entry rows - s += " ORDER BY tm.height DESC, ee.rowid ASC" + s += " ORDER BY tm.height DESC, ee._rowid_ ASC" return values, s } diff --git a/chainindex/gc.go b/chainindex/gc.go index 6b4b1e336d8..f82c151b450 100644 --- a/chainindex/gc.go +++ b/chainindex/gc.go @@ -49,7 +49,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { return } - log.Infof("gc'ing all(reverted and non-reverted) tipsets before epoch %d", removalEpoch) + log.Infof("gc'ing all (reverted and non-reverted) tipsets before epoch %d", removalEpoch) res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) if err != nil { diff --git a/chainindex/helpers.go b/chainindex/helpers.go index fcc10de0e60..c4801af85ef 100644 --- a/chainindex/helpers.go +++ b/chainindex/helpers.go @@ -81,7 +81,6 @@ func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer } } -// revert function for observer func toTipsetKeyCidBytes(ts *types.TipSet) ([]byte, error) { tsKeyCid, err := ts.Key().Cid() if err != nil { diff --git a/chainindex/interface.go b/chainindex/interface.go index b57a0eac1a6..f54036be209 100644 --- a/chainindex/interface.go +++ b/chainindex/interface.go @@ -15,7 +15,7 @@ import ( "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var ErrNotFound = errors.New("required data not found in index") +var ErrNotFound = errors.New("not found in index") var ErrClosed = errors.New("index closed") // MsgInfo is the Message metadata the index tracks. diff --git a/chainindex/reconcile.go b/chainindex/reconcile.go index 213cea7b81a..8e14ba081c4 100644 --- a/chainindex/reconcile.go +++ b/chainindex/reconcile.go @@ -62,7 +62,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } currTs := head - log.Infof("Starting chain reconciliation from head height %d, reconciliationEpoch: %d", head.Height(), reconciliationEpoch) + log.Infof("Starting chain reconciliation from head height %d; searching for base reconciliation height above %d)", head.Height(), reconciliationEpoch) var missingTipsets []*types.TipSet // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset @@ -124,13 +124,14 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if err != nil { return xerrors.Errorf("failed to get number of rows affected: %w", err) } - log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) // also need to mark events as reverted for the corresponding inclusion tipsets if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { return xerrors.Errorf("failed to mark events as reverted: %w", err) } + log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + return si.applyMissingTipsets(ctx, tx, missingTipsets) }) } @@ -139,7 +140,7 @@ func (si *SqliteIndexer) backfillEmptyIndex(ctx context.Context, tx *sql.Tx, hea currTs := head var missingTipsets []*types.TipSet - log.Infof("backfilling empty chain index from head height %d", head.Height()) + log.Infof("Backfilling empty chain index from head height %d", head.Height()) var err error for currTs != nil && len(missingTipsets) < si.maxReconcileTipsets { @@ -174,6 +175,7 @@ func (si *SqliteIndexer) applyMissingTipsets(ctx context.Context, tx *sql.Tx, mi var err error if i < len(missingTipsets)-1 { + // a caller must supply a reverse-ordered contiguous list of missingTipsets parentTs = missingTipsets[i+1] } else if currTs.Height() > 0 { parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index d839dcdf329..9833a915edb 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -318,20 +318,23 @@ # env var: LOTUS_CHAININDEXER_GCRETENTIONEPOCHS #GCRetentionEpochs = 0 - # ReconcileEmptyIndex determines whether to reconcile the index with the chain state - # during startup when the index is empty. + # ReconcileEmptyIndex determines whether to reconcile the index with historic + # chain state during startup when the index is empty. This is limited by the + # amount of chain state available, and the MaxReconcileTipsets option. # - # When set to true: - # - On startup, if the index is empty, the indexer will index the available - # chain state on the node albeit within the MaxReconcileTipsets limit. + # When set to true: historic indexing is desirable and the index is empty. + # On startup, the indexer will index the available chain state on the node, + # albeit within the MaxReconcileTipsets limit and the available chain state. # - # When set to false: - # - The indexer will not automatically re-index the chain state on startup if the index is empty. + # When set to false: historic indexing is not needed. The indexer will not + # automatically re-index historic chain state on startup if the index is + # empty, but will instead only index from the point that indexing is turned + # on. # # Default: false # - # Note: The number of tipsets reconciled (i.e. indexed) during this process can be - # controlled using the MaxReconcileTipsets option. + # Note: The number of tipsets reconciled (i.e. indexed) during this process + # can be controlled using the MaxReconcileTipsets option. # # type: bool # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX @@ -339,6 +342,11 @@ # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. # It represents the maximum number of tipsets to index from the chain state that are absent in the index. + # + # Reconciliation generally only involves checking the latest tipset, but in + # the case of an empty index, or an index that has not been actively + # maintained (such as indexing being turned off for a period). This option + # limits the number of historic tipsets to reconcile with the chain state. # # Default: 3 * epochsPerDay (approximately 3 days of chain history) # From c4490bbf8de84de58ac8ae83c9cdc7b0212d983e Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 16:38:08 +0400 Subject: [PATCH 39/66] fix go mod --- go.mod | 28 ++++++++++++++-------------- go.sum | 56 ++++++++++++++++++++++++++++---------------------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/go.mod b/go.mod index 15c6557ec49..d21a5e1e502 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/filecoin-project/go-commp-utils/v2 v2.1.0 github.com/filecoin-project/go-crypto v0.1.0 github.com/filecoin-project/go-f3 v0.2.0 - github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commcid v0.2.0 github.com/filecoin-project/go-hamt-ipld/v3 v3.4.0 github.com/filecoin-project/go-jsonrpc v0.6.0 github.com/filecoin-project/go-padreader v0.0.1 @@ -113,7 +113,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/manifoldco/promptui v0.9.0 github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 v1.14.22 + github.com/mattn/go-sqlite3 v1.14.16 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.12.4 @@ -131,7 +131,7 @@ require ( github.com/sirupsen/logrus v1.9.2 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed + github.com/triplewz/poseidon v0.0.1 github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/cbor-gen v0.1.2 @@ -153,15 +153,15 @@ require ( go.uber.org/fx v1.22.1 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.25.0 - golang.org/x/mod v0.17.0 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.23.0 - golang.org/x/term v0.22.0 - golang.org/x/time v0.5.0 - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d - golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 + golang.org/x/crypto v0.27.0 + golang.org/x/mod v0.20.0 + golang.org/x/net v0.29.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.25.0 + golang.org/x/term v0.24.0 + golang.org/x/time v0.6.0 + golang.org/x/tools v0.24.0 + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible ) @@ -329,8 +329,8 @@ require ( go.uber.org/dig v1.17.1 // indirect go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect + golang.org/x/text v0.18.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect google.golang.org/grpc v1.64.0 // indirect diff --git a/go.sum b/go.sum index 193643a51b6..9dcc39b5d3a 100644 --- a/go.sum +++ b/go.sum @@ -270,8 +270,8 @@ github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/ github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= github.com/filecoin-project/go-f3 v0.2.0 h1:Gis44+hOrDjSUEw3IDmU7CudNILi5e+bb1pgZgp680k= github.com/filecoin-project/go-f3 v0.2.0/go.mod h1:43fBLX0iX0+Nnw4Z91wSrdfDYAd6YEDexy7GcLnIJtk= -github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= -github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.2.0 h1:B+5UX8XGgdg/XsdUpST4pEBviKkFOw+Fvl2bLhSKGpI= +github.com/filecoin-project/go-fil-commcid v0.2.0/go.mod h1:8yigf3JDIil+/WpqR5zoKyP0jBPCOGtEqq/K1CcMy9Q= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 h1:HYIUugzjq78YvV3vC6rL95+SfC/aSTVSnZSZiDV5pCk= github.com/filecoin-project/go-fil-commp-hashhash v0.2.0/go.mod h1:VH3fAFOru4yyWar4626IoS5+VGE8SfZiBODJLUigEo4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= @@ -908,8 +908,8 @@ github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1258,8 +1258,8 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= +github.com/triplewz/poseidon v0.0.1 h1:G5bdkTzb9R5K5Dd3DIzBCp7rAErP1zWH0LW7Ip6bxIA= +github.com/triplewz/poseidon v0.0.1/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -1439,8 +1439,8 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1451,8 +1451,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1477,8 +1477,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1539,8 +1539,8 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1565,8 +1565,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1658,8 +1658,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1673,8 +1673,8 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1688,14 +1688,14 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1751,14 +1751,14 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= From 93a8b7668ac57464643b5beb888052a7cf66b164 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 16:48:51 +0400 Subject: [PATCH 40/66] fix formatting --- cmd/lotus-bench/import.go | 4 ++-- cmd/lotus-shed/balances.go | 3 +-- cmd/lotus-shed/invariants.go | 3 +-- cmd/lotus-shed/state-stats.go | 3 +-- conformance/driver.go | 3 +-- node/impl/full/eth.go | 8 ++++---- 6 files changed, 10 insertions(+), 14 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index f1e2019ed7f..46f2411bf86 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -228,8 +228,8 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), - filcns.DefaultUpgradeSchedule(), nil, metadataDs, nil) + stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, + metadataDs, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index ab2bba5ff9b..0c9cb939d5d 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -513,8 +513,7 @@ var chainBalanceStateCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, - nil) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 8455cb917b5..3c754b0ac00 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -126,8 +126,7 @@ var invariantsCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), - vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go index 4666ad02d3a..e5145e5c178 100644 --- a/cmd/lotus-shed/state-stats.go +++ b/cmd/lotus-shed/state-stats.go @@ -258,8 +258,7 @@ func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) } tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, - nil) + sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return nil, fmt.Errorf("failed to open state manager: %w", err) } diff --git a/conformance/driver.go b/conformance/driver.go index f57f2f11fb7..15ae567063a 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -109,8 +109,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, - nil) + sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, nil) ) if err != nil { return nil, err diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 8a989058725..c1ba0ed6d7d 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -483,7 +483,7 @@ func (a *EthModule) EthGetTransactionCount(ctx context.Context, sender ethtypes. // First, handle the case where the "sender" is an EVM actor. if actor, err := a.StateManager.LoadActor(ctx, addr, ts); err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { + if errors.Is(err, types.ErrActorNotFound) { return 0, nil } return 0, xerrors.Errorf("failed to lookup contract %s: %w", sender, err) @@ -581,7 +581,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress, actor, err := a.StateManager.LoadActor(ctx, to, ts) if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { + if errors.Is(err, types.ErrActorNotFound) { return nil, nil } return nil, xerrors.Errorf("failed to lookup contract %s: %w", ethAddr, err) @@ -674,7 +674,7 @@ func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAdd actor, err := a.StateManager.LoadActor(ctx, to, ts) if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { + if errors.Is(err, types.ErrActorNotFound) { return ethtypes.EthBytes(make([]byte, 32)), nil } return nil, xerrors.Errorf("failed to lookup contract %s: %w", ethAddr, err) @@ -755,7 +755,7 @@ func (a *EthModule) EthGetBalance(ctx context.Context, address ethtypes.EthAddre } actor, err := a.StateManager.LoadActorRaw(ctx, filAddr, st) - if xerrors.Is(err, types.ErrActorNotFound) { + if errors.Is(err, types.ErrActorNotFound) { return ethtypes.EthBigIntZero, nil } else if err != nil { return ethtypes.EthBigInt{}, err From 6f8530e352a1943211e233863ef861c686975bff Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 17:04:34 +0400 Subject: [PATCH 41/66] revert config changes --- documentation/en/default-lotus-config.toml | 32 ++++++++-------------- node/config/def.go | 6 ++-- node/modules/chainindex.go | 12 ++++---- 3 files changed, 21 insertions(+), 29 deletions(-) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 9833a915edb..de24e397216 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -226,7 +226,7 @@ # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC - #EnableEthRPC = true + #EnableEthRPC = false # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # @@ -260,7 +260,7 @@ # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI - #EnableActorEventsAPI = true + #EnableActorEventsAPI = false # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than # this time become eligible for automatic deletion. Filters consume resources, so if they are unused they @@ -306,7 +306,7 @@ # # type: bool # env var: LOTUS_CHAININDEXER_DISABLEINDEXER - #DisableIndexer = false + #DisableIndexer = true # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. # The garbage collection (GC) process removes data older than this retention period. @@ -318,23 +318,20 @@ # env var: LOTUS_CHAININDEXER_GCRETENTIONEPOCHS #GCRetentionEpochs = 0 - # ReconcileEmptyIndex determines whether to reconcile the index with historic - # chain state during startup when the index is empty. This is limited by the - # amount of chain state available, and the MaxReconcileTipsets option. + # ReconcileEmptyIndex determines whether to reconcile the index with the chain state + # during startup when the index is empty. # - # When set to true: historic indexing is desirable and the index is empty. - # On startup, the indexer will index the available chain state on the node, - # albeit within the MaxReconcileTipsets limit and the available chain state. + # When set to true: + # - On startup, if the index is empty, the indexer will index the available + # chain state on the node albeit within the MaxReconcileTipsets limit. # - # When set to false: historic indexing is not needed. The indexer will not - # automatically re-index historic chain state on startup if the index is - # empty, but will instead only index from the point that indexing is turned - # on. + # When set to false: + # - The indexer will not automatically re-index the chain state on startup if the index is empty. # # Default: false # - # Note: The number of tipsets reconciled (i.e. indexed) during this process - # can be controlled using the MaxReconcileTipsets option. + # Note: The number of tipsets reconciled (i.e. indexed) during this process can be + # controlled using the MaxReconcileTipsets option. # # type: bool # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX @@ -342,11 +339,6 @@ # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. # It represents the maximum number of tipsets to index from the chain state that are absent in the index. - # - # Reconciliation generally only involves checking the latest tipset, but in - # the case of an empty index, or an index that has not been actively - # maintained (such as indexing being turned off for a period). This option - # limits the number of historic tipsets to reconcile with the chain state. # # Default: 3 * epochsPerDay (approximately 3 days of chain history) # diff --git a/node/config/def.go b/node/config/def.go index 7cf8a29e7a5..5cb69358336 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,20 +83,20 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: true, + EnableEthRPC: false, EthTraceFilterMaxResults: 500, EthBlkCacheSize: 500, }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, - EnableActorEventsAPI: true, + EnableActorEventsAPI: false, FilterTTL: Duration(time.Hour * 1), MaxFilters: 100, MaxFilterResults: 10000, MaxFilterHeightRange: 2880, // conservative limit of one day }, ChainIndexer: ChainIndexerConfig{ - DisableIndexer: false, + DisableIndexer: true, GCRetentionEpochs: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index dd26e303bb0..3af12d9d9fd 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -70,6 +70,12 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind return *actor.DelegatedAddress, true }) + ch, err := mp.Updates(ctx) + if err != nil { + return err + } + go chainindex.WaitForMpoolUpdates(ctx, ch, indexer) + ev, err := events.NewEvents(ctx, &evapi) if err != nil { return err @@ -89,12 +95,6 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind log.Infof("Chain indexer reconciled with chain state; observer will start upates from height: %d", head.Height()) unlockObserver() - ch, err := mp.Updates(ctx) - if err != nil { - return err - } - go chainindex.WaitForMpoolUpdates(ctx, ch, indexer) - if err := indexer.Start(); err != nil { return err } From 627aff2e31ce855934b57bf04567b5e66bbed841 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 17:14:20 +0400 Subject: [PATCH 42/66] address changes in observer --- chain/events/observer.go | 16 ++++++---------- itests/kit/node_opts.go | 1 + 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/chain/events/observer.go b/chain/events/observer.go index 592585b04ed..896440eacbc 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -157,13 +157,14 @@ func (o *observer) applyChanges(ctx context.Context, changes []*api.HeadChange) } func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error { + o.lk.Lock() + defer o.lk.Unlock() + ctx, span := trace.StartSpan(ctx, "events.HeadChange") span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) - o.lk.Lock() head := o.head - o.lk.Unlock() defer func() { span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height()))) @@ -199,12 +200,10 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err // 1. We need to get the observers every time in case some registered/deregistered. // 2. We need to atomically set the head so new observers don't see events twice or // skip them. - o.lk.Lock() - observers := o.observers + o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Revert(ctx, from, to); err != nil { log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) } @@ -225,12 +224,9 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err ) } - o.lk.Lock() - observers := o.observers o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Apply(ctx, head, to); err != nil { log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) } diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 400f2bc8931..54fe12442d9 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -65,6 +65,7 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true + cfg.ChainIndexer.DisableIndexer = false cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true cfg.ChainIndexer.ReconcileEmptyIndex = true From 7244b667b7de49898818e652fc2fbf89981eb599 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 17:41:19 +0400 Subject: [PATCH 43/66] remove top level chainindex package --- chain/events/filter/event.go | 16 ++++++++-------- chain/events/filter/event_test.go | 8 ++++---- {chainindex => chain/index}/ddls.go | 2 +- {chainindex => chain/index}/events.go | 2 +- {chainindex => chain/index}/gc.go | 2 +- {chainindex => chain/index}/helpers.go | 2 +- {chainindex => chain/index}/indexer.go | 2 +- {chainindex => chain/index}/interface.go | 2 +- {chainindex => chain/index}/pub_sub.go | 2 +- {chainindex => chain/index}/read.go | 2 +- {chainindex => chain/index}/reconcile.go | 2 +- chain/stmgr/searchwait.go | 6 +++--- chain/stmgr/stmgr.go | 8 ++++---- cmd/lotus/daemon.go | 4 ++-- node/builder_chain.go | 4 ++-- node/impl/full/actor_events.go | 4 ++-- node/impl/full/actor_events_test.go | 22 +++++++++++----------- node/impl/full/eth.go | 20 ++++++++++---------- node/impl/full/eth_events.go | 12 ++++++------ node/modules/actorevent.go | 6 +++--- node/modules/chainindex.go | 12 ++++++------ node/modules/ethmodule.go | 6 +++--- node/modules/stmgr.go | 4 ++-- 23 files changed, 75 insertions(+), 75 deletions(-) rename {chainindex => chain/index}/ddls.go (99%) rename {chainindex => chain/index}/events.go (99%) rename {chainindex => chain/index}/gc.go (99%) rename {chainindex => chain/index}/helpers.go (99%) rename {chainindex => chain/index}/indexer.go (99%) rename {chainindex => chain/index}/interface.go (99%) rename {chainindex => chain/index}/pub_sub.go (98%) rename {chainindex => chain/index}/read.go (99%) rename {chainindex => chain/index}/reconcile.go (99%) diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index f5d7ac8f106..277b941e80c 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -16,9 +16,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/lotus/chain/index" cstore "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" ) func isIndexedValue(b uint8) bool { @@ -33,7 +33,7 @@ type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address. type EventFilter interface { Filter - TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error } @@ -48,7 +48,7 @@ type eventFilter struct { maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex - collected []*chainindex.CollectedEvent + collected []*index.CollectedEvent lastTaken time.Time ch chan<- interface{} } @@ -109,7 +109,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // event matches filter, so record it - cev := &chainindex.CollectedEvent{ + cev := &index.CollectedEvent{ Entries: ev.Entries, EmitterAddr: addr, EventIdx: eventCount, @@ -141,13 +141,13 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *eventFilter) setCollectedEvents(ces []*chainindex.CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*index.CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*chainindex.CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*index.CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -297,7 +297,7 @@ type EventFilterManager struct { ChainStore *cstore.ChainStore AddressResolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) MaxFilterResults int - ChainIndexer chainindex.Indexer + ChainIndexer index.Indexer mu sync.Mutex // guards mutations to filters filters map[types.FilterID]EventFilter @@ -384,7 +384,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a } if m.ChainIndexer != nil && minHeight != -1 && minHeight < currentHeight { - ef := &chainindex.EventFilter{ + ef := &index.EventFilter{ MinHeight: minHeight, MaxHeight: maxHeight, TipsetCid: tipsetCid, diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index 7626be059f4..5ffb678c65e 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -19,8 +19,8 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" ) func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { @@ -71,8 +71,8 @@ func TestEventFilterCollectEvents(t *testing.T) { cid14000, err := events14000.msgTs.Key().Cid() require.NoError(t, err, "tipset cid") - noCollectedEvents := []*chainindex.CollectedEvent{} - oneCollectedEvent := []*chainindex.CollectedEvent{ + noCollectedEvents := []*index.CollectedEvent{} + oneCollectedEvent := []*index.CollectedEvent{ { Entries: ev1.Entries, EmitterAddr: a1, @@ -89,7 +89,7 @@ func TestEventFilterCollectEvents(t *testing.T) { name string filter *eventFilter te *TipSetEvents - want []*chainindex.CollectedEvent + want []*index.CollectedEvent }{ { name: "nomatch tipset min height", diff --git a/chainindex/ddls.go b/chain/index/ddls.go similarity index 99% rename from chainindex/ddls.go rename to chain/index/ddls.go index 4937972e3f3..14505b03904 100644 --- a/chainindex/ddls.go +++ b/chain/index/ddls.go @@ -1,4 +1,4 @@ -package chainindex +package index const DefaultDbFilename = "chainindex.db" diff --git a/chainindex/events.go b/chain/index/events.go similarity index 99% rename from chainindex/events.go rename to chain/index/events.go index 079c2470787..77274c48e46 100644 --- a/chainindex/events.go +++ b/chain/index/events.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "bytes" diff --git a/chainindex/gc.go b/chain/index/gc.go similarity index 99% rename from chainindex/gc.go rename to chain/index/gc.go index f82c151b450..983c12eafd1 100644 --- a/chainindex/gc.go +++ b/chain/index/gc.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chainindex/helpers.go b/chain/index/helpers.go similarity index 99% rename from chainindex/helpers.go rename to chain/index/helpers.go index c4801af85ef..54b6300ed9a 100644 --- a/chainindex/helpers.go +++ b/chain/index/helpers.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chainindex/indexer.go b/chain/index/indexer.go similarity index 99% rename from chainindex/indexer.go rename to chain/index/indexer.go index a936b49a30a..7d6537c38f7 100644 --- a/chainindex/indexer.go +++ b/chain/index/indexer.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chainindex/interface.go b/chain/index/interface.go similarity index 99% rename from chainindex/interface.go rename to chain/index/interface.go index f54036be209..8e695d95d36 100644 --- a/chainindex/interface.go +++ b/chain/index/interface.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chainindex/pub_sub.go b/chain/index/pub_sub.go similarity index 98% rename from chainindex/pub_sub.go rename to chain/index/pub_sub.go index b24b154e4b9..a8dd8d05b7b 100644 --- a/chainindex/pub_sub.go +++ b/chain/index/pub_sub.go @@ -1,4 +1,4 @@ -package chainindex +package index import "context" diff --git a/chainindex/read.go b/chain/index/read.go similarity index 99% rename from chainindex/read.go rename to chain/index/read.go index 8aefb6d6b08..ed66f7e04ff 100644 --- a/chainindex/read.go +++ b/chain/index/read.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chainindex/reconcile.go b/chain/index/reconcile.go similarity index 99% rename from chainindex/reconcile.go rename to chain/index/reconcile.go index 8e14ba081c4..1223cc053bb 100644 --- a/chainindex/reconcile.go +++ b/chain/index/reconcile.go @@ -1,4 +1,4 @@ -package chainindex +package index import ( "context" diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 420c680b2d3..d0db314a20d 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -10,9 +10,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" ) // WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already @@ -168,7 +168,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet log.Debugf("message %s not found", mcid) } - case errors.Is(err, chainindex.ErrNotFound): + case errors.Is(err, index.ErrNotFound): // ok for the index to have incomplete data default: @@ -191,7 +191,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { if sm.chainIndexer == nil { - return nil, nil, cid.Undef, chainindex.ErrNotFound + return nil, nil, cid.Undef, index.ErrNotFound } minfo, err := sm.chainIndexer.GetMsgInfo(ctx, mcid) if err != nil { diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index cd376f6d573..9819351ed74 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -28,12 +28,12 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" // Used for genesis. msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" @@ -156,7 +156,7 @@ type StateManager struct { tsExecMonitor ExecMonitor beacon beacon.Schedule - chainIndexer chainindex.Indexer + chainIndexer index.Indexer // We keep a small cache for calls to ExecutionTrace which helps improve // performance for node operators like exchanges and block explorers @@ -178,7 +178,7 @@ type tipSetCacheEntry struct { } func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, - metadataDs dstore.Batching, chainIndexer chainindex.Indexer) (*StateManager, error) { + metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -248,7 +248,7 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, chainIndexer chainindex.Indexer) (*StateManager, error) { +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 5a21d2258e0..eb33ac731a5 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -36,12 +36,12 @@ import ( "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/fsjournal" @@ -636,7 +636,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) } log.Info("populating chain index...") - if err := chainindex.PopulateFromSnapshot(ctx, filepath.Join(basePath, chainindex.DefaultDbFilename), cst); err != nil { + if err := index.PopulateFromSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { return err } log.Info("populating chain index done") diff --git a/node/builder_chain.go b/node/builder_chain.go index 2f0be6d503f..490bfefd703 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/lf3" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/messagepool" @@ -34,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/wallet" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" "github.com/filecoin-project/lotus/chain/wallet/remotewallet" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/hello" @@ -282,7 +282,7 @@ func ConfigFullNode(c interface{}) Option { Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), - Override(new(chainindex.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), + Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), If(!cfg.ChainIndexer.DisableIndexer, Override(InitChainIndexerKey, modules.InitChainIndexer), ), diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go index aac86ff7434..a865fb81b6b 100644 --- a/node/impl/full/actor_events.go +++ b/node/impl/full/actor_events.go @@ -14,8 +14,8 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" ) type ActorEventAPI interface { @@ -284,7 +284,7 @@ func (a *ActorEventHandler) SubscribeActorEventsRaw(ctx context.Context, evtFilt nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) collectEvent := func(ev interface{}) bool { - ce, ok := ev.(*chainindex.CollectedEvent) + ce, ok := ev.(*index.CollectedEvent) if !ok { log.Errorf("got unexpected value from event filter: %T", ev) return false diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go index 92fdbeaa87b..6a81ad6bbe2 100644 --- a/node/impl/full/actor_events_test.go +++ b/node/impl/full/actor_events_test.go @@ -19,8 +19,8 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" ) var testCid = cid.MustParse("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") @@ -332,7 +332,7 @@ func TestSubscribeActorEventsRaw(t *testing.T) { req.NoError(err) mockChain.setHeaviestTipSet(ts) - var eventsThisEpoch []*chainindex.CollectedEvent + var eventsThisEpoch []*index.CollectedEvent if thisHeight <= finishHeight { eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] } @@ -530,13 +530,13 @@ type mockFilter struct { id types.FilterID lastTaken time.Time ch chan<- interface{} - historicalEvents []*chainindex.CollectedEvent + historicalEvents []*index.CollectedEvent subChannelCalls int clearSubChannelCalls int lk sync.Mutex } -func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*chainindex.CollectedEvent) *mockFilter { +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*index.CollectedEvent) *mockFilter { t.Helper() var id [32]byte _, err := rng.Read(id[:]) @@ -549,7 +549,7 @@ func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historic } } -func (m *mockFilter) sendEventToChannel(e *chainindex.CollectedEvent) { +func (m *mockFilter) sendEventToChannel(e *index.CollectedEvent) { m.lk.Lock() defer m.lk.Unlock() if m.ch != nil { @@ -603,7 +603,7 @@ func (m *mockFilter) ClearSubChannel() { m.ch = nil } -func (m *mockFilter) TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent { +func (m *mockFilter) TakeCollectedEvents(context.Context) []*index.CollectedEvent { e := m.historicalEvents m.historicalEvents = nil m.lastTaken = time.Now() @@ -730,7 +730,7 @@ func epochPtr(i int) *abi.ChainEpoch { return &e } -func collectedToActorEvents(collected []*chainindex.CollectedEvent) []*types.ActorEvent { +func collectedToActorEvents(collected []*index.CollectedEvent) []*types.ActorEvent { var out []*types.ActorEvent for _, c := range collected { out = append(out, &types.ActorEvent{ @@ -745,8 +745,8 @@ func collectedToActorEvents(collected []*chainindex.CollectedEvent) []*types.Act return out } -func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*chainindex.CollectedEvent { - var out []*chainindex.CollectedEvent +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*index.CollectedEvent { + var out []*index.CollectedEvent for h := eventStartHeight; h <= eventEndHeight; h++ { for i := int64(0); i < eventsPerHeight; i++ { out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) @@ -755,11 +755,11 @@ func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, event return out } -func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *chainindex.CollectedEvent { +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *index.CollectedEvent { addr, err := address.NewIDAddress(uint64(rng.Int63())) require.NoError(t, err) - return &chainindex.CollectedEvent{ + return &index.CollectedEvent{ Entries: []types.EventEntry{ {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index c1ba0ed6d7d..519a5b871e4 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -33,12 +33,12 @@ import ( builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinevm "github.com/filecoin-project/lotus/chain/actors/builtin/evm" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -137,7 +137,7 @@ type EthModule struct { EthBlkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks by their CID but blocks only have the transaction hashes EthBlkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks along with full transaction payload by their CID - ChainIndexer chainindex.Indexer + ChainIndexer index.Indexer ChainAPI MpoolAPI @@ -165,7 +165,7 @@ type EthAPI struct { Chain *store.ChainStore StateManager *stmgr.StateManager - ChainIndexer chainindex.Indexer + ChainIndexer index.Indexer MpoolAPI MpoolAPI EthModuleAPI @@ -359,7 +359,7 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil && errors.Is(err, chainindex.ErrNotFound) { + if err != nil && errors.Is(err, index.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) @@ -425,7 +425,7 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas var err error if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) - if err != nil && errors.Is(err, chainindex.ErrNotFound) { + if err != nil && errors.Is(err, index.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) @@ -433,7 +433,7 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas } } - if errors.Is(err, chainindex.ErrNotFound) { + if errors.Is(err, index.ErrNotFound) { log.Debug("could not find transaction hash %s in lookup table", txHash.String()) } else if a.ChainIndexer != nil { return &c, nil @@ -518,7 +518,7 @@ func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash if a.ChainIndexer != nil { c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) - if err != nil && errors.Is(err, chainindex.ErrNotFound) { + if err != nil && errors.Is(err, index.ErrNotFound) { log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) @@ -947,7 +947,7 @@ func (a *EthAPI) EthSendRawTransactionUntrusted(ctx context.Context, rawTx ethty return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, true) } -func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, indexer chainindex.Indexer, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { +func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, indexer index.Indexer, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { txArgs, err := ethtypes.ParseEthTransaction(rawTx) if err != nil { return ethtypes.EmptyEthHash, err @@ -1595,7 +1595,7 @@ func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.E return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*chainindex.CollectedEvent, error) { +func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*index.CollectedEvent, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } @@ -1625,7 +1625,7 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } - ef := &chainindex.EventFilter{ + ef := &index.EventFilter{ MinHeight: pf.minHeight, MaxHeight: pf.maxHeight, TipsetCid: pf.tipsetCid, diff --git a/node/impl/full/eth_events.go b/node/impl/full/eth_events.go index 171b5f9e164..850826ecf9c 100644 --- a/node/impl/full/eth_events.go +++ b/node/impl/full/eth_events.go @@ -13,14 +13,14 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/chainindex" ) type filterEventCollector interface { - TakeCollectedEvents(context.Context) []*chainindex.CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent } type filterMessageCollector interface { @@ -94,7 +94,7 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes return data, topics, true } -func ethFilterLogsFromEvents(ctx context.Context, evs []*chainindex.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { +func ethFilterLogsFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { var logs []ethtypes.EthLog for _, ev := range evs { log := ethtypes.EthLog{ @@ -141,7 +141,7 @@ func ethFilterLogsFromEvents(ctx context.Context, evs []*chainindex.CollectedEve return logs, nil } -func ethFilterResultFromEvents(ctx context.Context, evs []*chainindex.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { +func ethFilterResultFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { logs, err := ethFilterLogsFromEvents(ctx, evs, sa) if err != nil { return nil, err @@ -348,8 +348,8 @@ func (e *ethSubscription) start(ctx context.Context) { return case v := <-e.in: switch vt := v.(type) { - case *chainindex.CollectedEvent: - evs, err := ethFilterResultFromEvents(ctx, []*chainindex.CollectedEvent{vt}, e.StateAPI) + case *index.CollectedEvent: + evs, err := ethFilterResultFromEvents(ctx, []*index.CollectedEvent{vt}, e.StateAPI) if err != nil { continue } diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 23c5607b04f..dcec80bdb55 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -12,11 +12,11 @@ import ( "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -95,9 +95,9 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me } func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, - *stmgr.StateManager, EventHelperAPI, full.ChainAPI, chainindex.Indexer) (*filter.EventFilterManager, error) { + *stmgr.StateManager, EventHelperAPI, full.ChainAPI, index.Indexer) (*filter.EventFilterManager, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, - evapi EventHelperAPI, chainapi full.ChainAPI, ci chainindex.Indexer) (*filter.EventFilterManager, error) { + evapi EventHelperAPI, chainapi full.ChainAPI, ci index.Indexer) (*filter.EventFilterManager, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Enable indexing of actor events diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 3af12d9d9fd..59128c47132 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -11,18 +11,18 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" ) -func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { - return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (chainindex.Indexer, error) { +func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { if cfg.DisableIndexer { log.Infof("ChainIndexer is disabled") return nil, nil @@ -34,7 +34,7 @@ func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx help } // TODO Implement config driven auto-backfilling - chainIndexer, err := chainindex.NewSqliteIndexer(filepath.Join(sqlitePath, chainindex.DefaultDbFilename), + chainIndexer, err := index.NewSqliteIndexer(filepath.Join(sqlitePath, index.DefaultDbFilename), cs, cfg.GCRetentionEpochs, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) if err != nil { return nil, err @@ -50,7 +50,7 @@ func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx help } } -func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainindex.Indexer, +func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer index.Indexer, evapi EventHelperAPI, mp *messagepool.MessagePool, sm *stmgr.StateManager) { ctx := helpers.LifecycleCtx(mctx, lc) @@ -74,7 +74,7 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer chainind if err != nil { return err } - go chainindex.WaitForMpoolUpdates(ctx, ch, indexer) + go index.WaitForMpoolUpdates(ctx, ch, indexer) ev, err := events.NewEvents(ctx, &evapi) if err != nil { diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index 89bf857ed76..61d957b7fad 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -10,11 +10,11 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -22,10 +22,10 @@ import ( ) func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, - EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, chainindex.Indexer) (*full.EthModule, error) { + EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, index.Indexer) (*full.EthModule, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, - ethEventHandler *full.EthEventHandler, chainIndexer chainindex.Indexer) (*full.EthModule, error) { + ethEventHandler *full.EthEventHandler, chainIndexer index.Indexer) (*full.EthModule, error) { // prefill the whole skiplist cache maintained internally by the GetTipsetByHeight go func() { diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index 0518380ea71..d07edba1a2b 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -4,14 +4,14 @@ import ( "go.uber.org/fx" "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chainindex" "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, chainIndexer chainindex.Indexer) (*stmgr.StateManager, error) { +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, chainIndexer index.Indexer) (*stmgr.StateManager, error) { sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err From 531cd3894daeb23dcae8ec2931fa2ebbbfbf7e19 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 18:07:57 +0400 Subject: [PATCH 44/66] changes as per review --- chain/index/events.go | 12 +++++++++++- chain/index/helpers.go | 14 ++++++++++++++ chain/index/read.go | 9 +-------- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/chain/index/events.go b/chain/index/events.go index 77274c48e46..f842248543f 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -30,6 +30,10 @@ type executedMessage struct { // events are indexed against their inclusion/message tipset when we get the corresponding execution tipset func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *types.TipSet, executionTs *types.TipSet) error { + if si.idToRobustAddrFunc == nil { + return xerrors.Errorf("indexer can not index events without an address resolver") + } + // check if we have an event indexed for any message in the `msgTs` tipset -> if so, there's nothig to do here // this makes event inserts idempotent msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) @@ -199,6 +203,11 @@ func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventF case f.MinHeight >= 0 && f.MinHeight == f.MaxHeight: tipsetKeyCid, err = si.getTipsetKeyCidByHeight(ctx, f.MinHeight) if err != nil { + if err == ErrNotFound { + // this means that this is a null round and there exist no events for this epoch + return nil + } + return xerrors.Errorf("failed to get tipset key cid by height: %w", err) } default: @@ -232,7 +241,8 @@ func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi } if ts.Height() != height { - return nil, ErrNotFound // No tipset at exact height + // this means that this is a null round + return nil, ErrNotFound } return toTipsetKeyCidBytes(ts) diff --git a/chain/index/helpers.go b/chain/index/helpers.go index 54b6300ed9a..4b17dd219a7 100644 --- a/chain/index/helpers.go +++ b/chain/index/helpers.go @@ -11,6 +11,20 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +// PopulateFromSnapshot initializes and populates the chain index from a snapshot. +// +// This function creates a new Index at the specified path and populates +// it by using the chain state from the provided ChainStore. It starts from the heaviest +// tipset and works backwards, indexing each tipset until it reaches the genesis +// block or encounters a tipset for which it is unable to find messages in the chain store. +// +// Important Notes: +// 1. This function assumes that the snapshot has already been imported into the ChainStore. +// 2. Events are not populated in the index because snapshots do not contain event data, +// and messages are not re-executed during this process. The resulting index will +// only contain tipsets and messages. +// 3. This function will delete any existing database at the specified path before +// creating a new one. func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { log.Infof("populating chainindex at path %s from snapshot", path) // Check if a database already exists and attempt to delete it diff --git a/chain/index/read.go b/chain/index/read.go index ed66f7e04ff..5019433fcad 100644 --- a/chain/index/read.go +++ b/chain/index/read.go @@ -90,7 +90,7 @@ func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc fun if err == sql.ErrNoRows { return ErrNotFound } - return xerrors.Errorf("failed to get message info: %w", err) + return xerrors.Errorf("failed to read data from index: %w", err) } return nil @@ -110,13 +110,6 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { return xerrors.Errorf("failed to get tipset key cid: %w", err) } - // is it already indexed? - if exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes); err != nil { - return xerrors.Errorf("failed to check if tipset exists: %w", err) - } else if exists { - return nil - } - // wait till it is indexed subCh, unsubFn := si.subscribeUpdates() defer unsubFn() From 77fc462921ba8422c6a48c48c0a7e82024a11ab2 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 18:41:23 +0400 Subject: [PATCH 45/66] changes as per review --- chain/index/gc.go | 7 +++++++ chain/index/helpers.go | 9 +++++++-- chain/index/indexer.go | 2 +- lib/sqlite/sqlite.go | 17 ++++++++--------- lib/sqlite/sqlite_test.go | 13 +++++-------- 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/chain/index/gc.go b/chain/index/gc.go index 983c12eafd1..648501a2b90 100644 --- a/chain/index/gc.go +++ b/chain/index/gc.go @@ -25,6 +25,13 @@ func (si *SqliteIndexer) gcLoop() { defer cleanupTicker.Stop() for si.ctx.Err() == nil { + si.closeLk.RLock() + if si.closed { + si.closeLk.RUnlock() + return + } + si.closeLk.RUnlock() + select { case <-cleanupTicker.C: si.gc(si.ctx) diff --git a/chain/index/helpers.go b/chain/index/helpers.go index 4b17dd219a7..eab54256589 100644 --- a/chain/index/helpers.go +++ b/chain/index/helpers.go @@ -5,6 +5,7 @@ import ( "database/sql" "os" + ipld "github.com/ipfs/go-ipld-format" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" @@ -54,8 +55,12 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error for curTs != nil { if err := si.indexTipset(ctx, tx, curTs); err != nil { - log.Infof("stopping chainindex population at height %d with final error: %s", curTs.Height(), err) - break + if ipld.IsNotFound(err) { + log.Infof("stopping chainindex population at height %d as snapshot only contains data upto this height; error is %s", curTs.Height(), err) + break + } + + return xerrors.Errorf("failed to populate chainindex from snapshot at height %d: %w", curTs.Height(), err) } totalIndexed++ diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 7d6537c38f7..634c7ac2c8d 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -66,7 +66,7 @@ type SqliteIndexer struct { func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, maxReconcileTipsets int) (si *SqliteIndexer, err error) { - db, _, err := sqlite.Open(path) + db, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) } diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index 039be43233d..15c965419d2 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -47,40 +47,39 @@ func metaDdl(version uint64) []string { } // Open opens a database at the given path. If the database does not exist, it will be created. -func Open(path string) (*sql.DB, bool, error) { +func Open(path string) (*sql.DB, error) { if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return nil, false, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) } _, err := os.Stat(path) if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, false, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) } - exists := err == nil db, err := sql.Open("sqlite3", path+"?mode=rwc") if err != nil { - return nil, false, xerrors.Errorf("error opening database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error opening database [@ %s]: %w", path, err) } for _, pragma := range pragmas { if _, err := db.Exec(pragma); err != nil { _ = db.Close() - return nil, false, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) + return nil, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) } } var foreignKeysEnabled int if err := db.QueryRow("PRAGMA foreign_keys;").Scan(&foreignKeysEnabled); err != nil { - return nil, false, xerrors.Errorf("failed to check foreign keys setting: %w", err) + return nil, xerrors.Errorf("failed to check foreign keys setting: %w", err) } if foreignKeysEnabled == 0 { - return nil, false, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) + return nil, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) } log.Infof("Database [@ %s] opened successfully with foreign keys enabled", path) - return db, exists, nil + return db, nil } // InitDb initializes the database by checking whether it needs to be created or upgraded. diff --git a/lib/sqlite/sqlite_test.go b/lib/sqlite/sqlite_test.go index bda6432f5e6..f492b092a5e 100644 --- a/lib/sqlite/sqlite_test.go +++ b/lib/sqlite/sqlite_test.go @@ -32,9 +32,8 @@ func TestSqlite(t *testing.T) { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "/test.db") - db, exists, err := sqlite.Open(dbPath) + db, err := sqlite.Open(dbPath) req.NoError(err) - req.False(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -95,9 +94,8 @@ func TestSqlite(t *testing.T) { // open again, check contents is the same - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -113,9 +111,9 @@ func TestSqlite(t *testing.T) { // open again, with a migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) + req.NotNil(db) req.NotNil(db) migration1 := func(ctx context.Context, tx *sql.Tx) error { @@ -156,9 +154,8 @@ func TestSqlite(t *testing.T) { // open again, with another migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) migration2 := func(ctx context.Context, tx *sql.Tx) error { From c2e5f6878b82128567ffcbb7eb767f83798c9589 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 19:17:51 +0400 Subject: [PATCH 46/66] changes as per review --- chain/index/ddls.go | 69 +++++------ chain/index/events.go | 8 +- chain/index/gc.go | 4 +- chain/index/indexer.go | 248 +++++++++++++-------------------------- chain/index/read.go | 6 +- chain/index/reconcile.go | 10 +- 6 files changed, 126 insertions(+), 219 deletions(-) diff --git a/chain/index/ddls.go b/chain/index/ddls.go index 14505b03904..c7002fde6b5 100644 --- a/chain/index/ddls.go +++ b/chain/index/ddls.go @@ -1,46 +1,8 @@ package index -const DefaultDbFilename = "chainindex.db" - -const ( - stmtGetNonRevertedMessageInfo = "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0" - stmtGetMsgCidFromEthHash = "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ?" - stmtInsertEthTxHash = "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP" - - stmtInsertTipsetMessage = "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0" - - stmtHasTipset = "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)" - stmtUpdateTipsetToNonReverted = "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?" - - stmtUpdateTipsetToReverted = "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?" - - stmtRemoveTipsetsBeforeHeight = "DELETE FROM tipset_message WHERE height < ?" - - stmtRemoveEthHashesOlderThan = `DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?);` - - stmtUpdateTipsetsToRevertedFromHeight = "UPDATE tipset_message SET reverted = 1 WHERE height >= ?" - - stmtUpdateEventsToRevertedFromHeight = "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE height >= ?)" +import "database/sql" - stmtIsTipsetMessageNonEmpty = "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)" - - stmtGetMinNonRevertedHeight = `SELECT MIN(height) FROM tipset_message WHERE reverted = 0` - - stmtHasNonRevertedTipset = `SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)` - - stmtUpdateEventsToReverted = `UPDATE event SET reverted = 1 WHERE message_id IN ( - SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? - )` - - stmtUpdateEventsToNonReverted = `UPDATE event SET reverted = 0 WHERE message_id IN ( - SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? - )` - - stmtGetMsgIdForMsgCidAndTipset = `SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ?AND reverted = 0` - - stmtInsertEvent = "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?)" - stmtInsertEventEntry = "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)" -) +const DefaultDbFilename = "chainindex.db" var ddls = []string{ `CREATE TABLE IF NOT EXISTS tipset_message ( @@ -91,3 +53,30 @@ var ddls = []string{ `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id)`, } + +// preparedStatementMapping returns a map of fields of the preparedStatements struct to the SQL +// query that should be prepared for that field. This is used to prepare all the statements in +// the preparedStatements struct. +func preparedStatementMapping(ps *preparedStatements) map[**sql.Stmt]string { + return map[**sql.Stmt]string{ + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ?", + &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", + &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", + &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateTipsetToNonRevertedStmt: "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?", + &ps.updateTipsetToRevertedStmt: "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?", + &ps.removeTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE height < ?", + &ps.removeEthHashesOlderThanStmt: "DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?)", + &ps.updateTipsetsToRevertedFromHeightStmt: "UPDATE tipset_message SET reverted = 1 WHERE height >= ?", + &ps.updateEventsToRevertedFromHeightStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE height >= ?)", + &ps.isTipsetMessageNonEmptyStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)", + &ps.getMinNonRevertedHeightStmt: "SELECT MIN(height) FROM tipset_message WHERE reverted = 0", + &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0", + &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?)", + &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", + } +} diff --git a/chain/index/events.go b/chain/index/events.go index f842248543f..6000ef348a8 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -42,7 +42,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ } // if we've already indexed events for this tipset, mark them as unreverted and return - res, err := tx.Stmt(si.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) + res, err := tx.Stmt(si.stmts.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) if err != nil { return xerrors.Errorf("failed to unrevert events for tipset: %w", err) } @@ -71,7 +71,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // read message id for this message cid and tipset key cid var messageID int64 - if err := tx.Stmt(si.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { + if err := tx.Stmt(si.stmts.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) } if messageID == 0 { @@ -92,7 +92,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ } // Insert event into events table - eventResult, err := tx.Stmt(si.insertEventStmt).Exec(messageID, eventCount, addr.Bytes(), 0) + eventResult, err := tx.Stmt(si.stmts.insertEventStmt).Exec(messageID, eventCount, addr.Bytes(), 0) if err != nil { return xerrors.Errorf("failed to insert event: %w", err) } @@ -105,7 +105,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // Insert event entries for _, entry := range event.Entries { - _, err := tx.Stmt(si.insertEventEntryStmt).Exec( + _, err := tx.Stmt(si.stmts.insertEventEntryStmt).Exec( eventID, isIndexedValue(entry.Flags), []byte{entry.Flags}, diff --git a/chain/index/gc.go b/chain/index/gc.go index 648501a2b90..168b59507e9 100644 --- a/chain/index/gc.go +++ b/chain/index/gc.go @@ -58,7 +58,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { log.Infof("gc'ing all (reverted and non-reverted) tipsets before epoch %d", removalEpoch) - res, err := si.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) + res, err := si.stmts.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) if err != nil { log.Errorw("failed to remove reverted tipsets before height", "height", removalEpoch, "error", err) return @@ -83,7 +83,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { } log.Infof("gc'ing eth hashes older than %d days", gcRetentionDays) - res, err = si.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(gcRetentionDays)) + " day") + res, err = si.stmts.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(gcRetentionDays)) + " day") if err != nil { log.Errorf("failed to gc eth hashes older than %d days: %w", gcRetentionDays, err) return diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 634c7ac2c8d..9e4f94d2281 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -22,16 +22,7 @@ var _ Indexer = (*SqliteIndexer)(nil) // IdToRobustAddrFunc is a function type that resolves an actor ID to a robust address type IdToRobustAddrFunc func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) -type SqliteIndexer struct { - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - - db *sql.DB - cs ChainStore - - idToRobustAddrFunc IdToRobustAddrFunc - +type preparedStatements struct { insertEthTxHashStmt *sql.Stmt getNonRevertedMsgInfoStmt *sql.Stmt getMsgCidFromEthHashStmt *sql.Stmt @@ -51,6 +42,19 @@ type SqliteIndexer struct { getMsgIdForMsgCidAndTipsetStmt *sql.Stmt insertEventStmt *sql.Stmt insertEventEntryStmt *sql.Stmt +} + +type SqliteIndexer struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + db *sql.DB + cs ChainStore + + idToRobustAddrFunc IdToRobustAddrFunc + + stmts *preparedStatements gcRetentionEpochs int64 reconcileEmptyIndex bool @@ -95,8 +99,10 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, recon gcRetentionEpochs: gcRetentionEpochs, reconcileEmptyIndex: reconcileEmptyIndex, maxReconcileTipsets: maxReconcileTipsets, + stmts: &preparedStatements{}, } - if err = si.prepareStatements(); err != nil { + + if err = si.initStatements(); err != nil { return nil, xerrors.Errorf("failed to prepare statements: %w", err) } @@ -133,102 +139,14 @@ func (si *SqliteIndexer) Close() error { return nil } -func (si *SqliteIndexer) prepareStatements() error { - var err error - - si.insertEthTxHashStmt, err = si.db.Prepare(stmtInsertEthTxHash) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "insertEthTxHashStmt", err) - } - - si.getNonRevertedMsgInfoStmt, err = si.db.Prepare(stmtGetNonRevertedMessageInfo) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "getNonRevertedMsgInfoStmt", err) - } - - si.getMsgCidFromEthHashStmt, err = si.db.Prepare(stmtGetMsgCidFromEthHash) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "getMsgCidFromEthHashStmt", err) - } - - si.insertTipsetMessageStmt, err = si.db.Prepare(stmtInsertTipsetMessage) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "insertTipsetMessageStmt", err) - } - - si.hasTipsetStmt, err = si.db.Prepare(stmtHasTipset) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "hasTipsetStmt", err) - } - - si.updateTipsetToNonRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToNonReverted) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateTipsetToNonRevertedStmt", err) - } - - si.updateTipsetToRevertedStmt, err = si.db.Prepare(stmtUpdateTipsetToReverted) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateTipsetToRevertedStmt", err) - } - - si.removeTipsetsBeforeHeightStmt, err = si.db.Prepare(stmtRemoveTipsetsBeforeHeight) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "removeTipsetsBeforeHeightStmt", err) - } - - si.removeEthHashesOlderThanStmt, err = si.db.Prepare(stmtRemoveEthHashesOlderThan) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "removeEthHashesOlderThanStmt", err) - } - - si.updateTipsetsToRevertedFromHeightStmt, err = si.db.Prepare(stmtUpdateTipsetsToRevertedFromHeight) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateTipsetsToRevertedFromHeightStmt", err) - } - - si.updateEventsToRevertedFromHeightStmt, err = si.db.Prepare(stmtUpdateEventsToRevertedFromHeight) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateEventsToRevertedFromHeightStmt", err) - } - - si.isTipsetMessageNonEmptyStmt, err = si.db.Prepare(stmtIsTipsetMessageNonEmpty) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "isTipsetMessageNonEmptyStmt", err) - } - - si.getMinNonRevertedHeightStmt, err = si.db.Prepare(stmtGetMinNonRevertedHeight) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "getMinNonRevertedHeightStmt", err) - } - - si.hasNonRevertedTipsetStmt, err = si.db.Prepare(stmtHasNonRevertedTipset) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "hasNonRevertedTipsetStmt", err) - } - - si.updateEventsToNonRevertedStmt, err = si.db.Prepare(stmtUpdateEventsToNonReverted) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateEventsToNonRevertedStmt", err) - } - - si.updateEventsToRevertedStmt, err = si.db.Prepare(stmtUpdateEventsToReverted) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "updateEventsToRevertedStmt", err) - } - - si.getMsgIdForMsgCidAndTipsetStmt, err = si.db.Prepare(stmtGetMsgIdForMsgCidAndTipset) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "getMsgIdForMsgCidAndTipsetStmt", err) - } - - si.insertEventStmt, err = si.db.Prepare(stmtInsertEvent) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "insertEventStmt", err) - } - - si.insertEventEntryStmt, err = si.db.Prepare(stmtInsertEventEntry) - if err != nil { - return xerrors.Errorf("prepare %s: %w", "insertEventEntryStmt", err) +func (si *SqliteIndexer) initStatements() error { + stmtMapping := preparedStatementMapping(si.stmts) + for stmtPointer, query := range stmtMapping { + var err error + *stmtPointer, err = si.db.Prepare(query) + if err != nil { + return xerrors.Errorf("prepare statement [%s]: %w", query, err) + } } return nil @@ -247,6 +165,16 @@ func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.Eth }) } +func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash ethtypes.EthHash, msgCid cid.Cid) error { + insertEthTxHashStmt := tx.Stmt(si.stmts.insertEthTxHashStmt) + _, err := insertEthTxHashStmt.ExecContext(ctx, txHash.String(), msgCid.Bytes()) + if err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + + return nil +} + func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { if msg.Signature.Type != crypto.SigTypeDelegated { return nil @@ -277,16 +205,6 @@ func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg return si.indexEthTxHash(ctx, tx, txHash, msg.Cid()) } -func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash ethtypes.EthHash, msgCid cid.Cid) error { - insertEthTxHashStmt := tx.Stmt(si.insertEthTxHashStmt) - _, err := insertEthTxHashStmt.ExecContext(ctx, txHash.String(), msgCid.Bytes()) - if err != nil { - return xerrors.Errorf("failed to index eth tx hash: %w", err) - } - - return nil -} - func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { si.closeLk.RLock() if si.closed { @@ -314,51 +232,6 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro return nil } -func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { - si.closeLk.RLock() - if si.closed { - si.closeLk.RUnlock() - return ErrClosed - } - si.closeLk.RUnlock() - - // We're reverting the chain from the tipset at `from` to the tipset at `to`. - // Height(to) < Height(from) - - revertTsKeyCid, err := toTipsetKeyCidBytes(from) - if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) - } - - // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. - // However, the tipet `T` itself is not reverted. - eventTsKeyCid, err := toTipsetKeyCidBytes(to) - if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) - } - - err = withTx(ctx, si.db, func(tx *sql.Tx) error { - if _, err := tx.Stmt(si.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { - return xerrors.Errorf("error marking tipset %s as reverted: %w", revertTsKeyCid, err) - } - - // events are indexed against the message inclusion tipset, not the message execution tipset. - // So we need to revert the events for the message inclusion tipset. - if _, err := tx.Stmt(si.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { - return xerrors.Errorf("error reverting events for tipset %s: %w", eventTsKeyCid, err) - } - - return nil - }) - if err != nil { - return xerrors.Errorf("error during revert transaction: %w", err) - } - - si.notifyUpdateSubs() - - return nil -} - func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) if err != nil { @@ -372,7 +245,7 @@ func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types. } height := ts.Height() - insertTipsetMsgStmt := tx.Stmt(si.insertTipsetMessageStmt) + insertTipsetMsgStmt := tx.Stmt(si.stmts.insertTipsetMessageStmt) msgs, err := si.cs.MessagesForTipset(ctx, ts) if err != nil { @@ -437,14 +310,59 @@ func (si *SqliteIndexer) indexTipsetWithParentEvents(ctx context.Context, tx *sq func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { // Check if the tipset already exists var exists bool - if err := tx.Stmt(si.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + if err := tx.Stmt(si.stmts.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { return false, xerrors.Errorf("failed to check if tipset exists: %w", err) } if exists { - if _, err := tx.Stmt(si.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { + if _, err := tx.Stmt(si.stmts.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { return false, xerrors.Errorf("failed to restore tipset: %w", err) } return true, nil } return false, nil } + +func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + si.closeLk.RLock() + if si.closed { + si.closeLk.RUnlock() + return ErrClosed + } + si.closeLk.RUnlock() + + // We're reverting the chain from the tipset at `from` to the tipset at `to`. + // Height(to) < Height(from) + + revertTsKeyCid, err := toTipsetKeyCidBytes(from) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + + // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. + // However, the tipet `T` itself is not reverted. + eventTsKeyCid, err := toTipsetKeyCidBytes(to) + if err != nil { + return xerrors.Errorf("error getting tipset key cid: %w", err) + } + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + if _, err := tx.Stmt(si.stmts.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { + return xerrors.Errorf("error marking tipset %s as reverted: %w", revertTsKeyCid, err) + } + + // events are indexed against the message inclusion tipset, not the message execution tipset. + // So we need to revert the events for the message inclusion tipset. + if _, err := tx.Stmt(si.stmts.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { + return xerrors.Errorf("error reverting events for tipset %s: %w", eventTsKeyCid, err) + } + + return nil + }) + if err != nil { + return xerrors.Errorf("error during revert transaction: %w", err) + } + + si.notifyUpdateSubs() + + return nil +} diff --git a/chain/index/read.go b/chain/index/read.go index 5019433fcad..3d03e7957ce 100644 --- a/chain/index/read.go +++ b/chain/index/read.go @@ -40,7 +40,7 @@ func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.Eth } func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash ethtypes.EthHash, msgCidBytes *[]byte) error { - return si.getMsgCidFromEthHashStmt.QueryRowContext(ctx, txHash.String()).Scan(msgCidBytes) + return si.stmts.getMsgCidFromEthHashStmt.QueryRowContext(ctx, txHash.String()).Scan(msgCidBytes) } func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { @@ -97,7 +97,7 @@ func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc fun } func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, tipsetKeyCidBytes *[]byte, height *int64) error { - return si.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) + return si.stmts.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) } func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { @@ -134,7 +134,7 @@ func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { var exists bool - if err := si.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + if err := si.stmts.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { return false, xerrors.Errorf("failed to check if tipset is indexed: %w", err) } return exists, nil diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 1223cc053bb..4af4e2a7614 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -38,7 +38,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip return withTx(ctx, si.db, func(tx *sql.Tx) error { var hasTipset bool - err := tx.StmtContext(ctx, si.isTipsetMessageNonEmptyStmt).QueryRowContext(ctx).Scan(&hasTipset) + err := tx.StmtContext(ctx, si.stmts.isTipsetMessageNonEmptyStmt).QueryRowContext(ctx).Scan(&hasTipset) if err != nil { return xerrors.Errorf("failed to check if tipset message is empty: %w", err) } @@ -56,7 +56,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk var reconciliationEpoch abi.ChainEpoch - row := tx.StmtContext(ctx, si.getMinNonRevertedHeightStmt).QueryRowContext(ctx) + row := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt).QueryRowContext(ctx) if err := row.Scan(&reconciliationEpoch); err != nil { return xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) } @@ -80,7 +80,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } var exists bool - err = tx.StmtContext(ctx, si.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + err = tx.StmtContext(ctx, si.stmts.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) if err != nil { return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) } @@ -116,7 +116,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) - result, err := tx.StmtContext(ctx, si.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) + result, err := tx.StmtContext(ctx, si.stmts.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) if err != nil { return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) } @@ -126,7 +126,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } // also need to mark events as reverted for the corresponding inclusion tipsets - if _, err = tx.StmtContext(ctx, si.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { + if _, err = tx.StmtContext(ctx, si.stmts.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { return xerrors.Errorf("failed to mark events as reverted: %w", err) } From 286af22213ec9275d40001ed0978bae6263fd5d4 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 20:14:13 +0400 Subject: [PATCH 47/66] handle index with reverted tipsets during reconciliation --- chain/index/reconcile.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 4af4e2a7614..247a52b9470 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -7,7 +7,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/types" ) @@ -55,13 +54,25 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip } // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk + var reconciliationEpochInIndex sql.NullInt64 var reconciliationEpoch abi.ChainEpoch + row := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt).QueryRowContext(ctx) - if err := row.Scan(&reconciliationEpoch); err != nil { - return xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + if err := row.Scan(&reconciliationEpochInIndex); err != nil { + if err != sql.ErrNoRows { + return xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + } + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + reconciliationEpoch = 0 + } else if !reconciliationEpochInIndex.Valid { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + reconciliationEpoch = 0 + } else { + reconciliationEpoch = abi.ChainEpoch(reconciliationEpochInIndex.Int64) } currTs := head + log.Infof("Starting chain reconciliation from head height %d; searching for base reconciliation height above %d)", head.Height(), reconciliationEpoch) var missingTipsets []*types.TipSet From d67a30ad87ea894cf7411e568fbd837f590bda39 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 20:30:20 +0400 Subject: [PATCH 48/66] changes as per review --- chain/index/reconcile.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 247a52b9470..59479dec770 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/types" + ipld "github.com/ipfs/go-ipld-format" ) // ReconcileWithChain ensures that the index is consistent with the current chain state. @@ -203,15 +204,20 @@ func (si *SqliteIndexer) applyMissingTipsets(ctx context.Context, tx *sql.Tx, mi } if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { - log.Warnf("failed to index tipset with parent events during reconciliation: %s", err) + if !ipld.IsNotFound(err) { + return xerrors.Errorf("failed to index tipset with parent events during reconciliation: %w", err) + } // the above could have failed because of missing messages for `parentTs` in the chainstore // so try to index only the currentTs and then halt the reconciliation process as we've // reached the end of what we have in the chainstore if err := si.indexTipset(ctx, tx, currTs); err != nil { - log.Warnf("failed to index tipset during reconciliation: %s", err) - } else { - totalIndexed++ + if !ipld.IsNotFound(err) { + return xerrors.Errorf("failed to index tipset during reconciliation: %w", err) + } + log.Infof("stopping reconciliation at height %d as chainstore only contains data up to this height; error is %s", currTs.Height(), err) + break } + totalIndexed++ break } From 5f5ef3a884c8547ca93ac86015a396256d03c6ab Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Fri, 13 Sep 2024 21:14:38 +0400 Subject: [PATCH 49/66] fix type of max reconcile epoch --- chain/index/indexer.go | 4 ++-- chain/index/reconcile.go | 7 ++++--- documentation/en/default-lotus-config.toml | 2 +- node/config/doc_gen.go | 2 +- node/config/types.go | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 9e4f94d2281..06766d2aa2a 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -58,7 +58,7 @@ type SqliteIndexer struct { gcRetentionEpochs int64 reconcileEmptyIndex bool - maxReconcileTipsets int + maxReconcileTipsets uint64 mu sync.Mutex updateSubs map[uint64]*updateSub @@ -69,7 +69,7 @@ type SqliteIndexer struct { } func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, - maxReconcileTipsets int) (si *SqliteIndexer, err error) { + maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { db, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 59479dec770..b06ba327d1b 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -4,11 +4,12 @@ import ( "context" "database/sql" + ipld "github.com/ipfs/go-ipld-format" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" - ipld "github.com/ipfs/go-ipld-format" ) // ReconcileWithChain ensures that the index is consistent with the current chain state. @@ -104,7 +105,7 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip break } - if len(missingTipsets) < si.maxReconcileTipsets { + if uint64(len(missingTipsets)) < si.maxReconcileTipsets { missingTipsets = append(missingTipsets, currTs) } // even if len(missingTipsets) >= si.maxReconcileTipsets, we still need to continue the walk @@ -155,7 +156,7 @@ func (si *SqliteIndexer) backfillEmptyIndex(ctx context.Context, tx *sql.Tx, hea log.Infof("Backfilling empty chain index from head height %d", head.Height()) var err error - for currTs != nil && len(missingTipsets) < si.maxReconcileTipsets { + for currTs != nil && uint64(len(missingTipsets)) < si.maxReconcileTipsets { missingTipsets = append(missingTipsets, currTs) if currTs.Height() == 0 { break diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index de24e397216..b8f744b9c1d 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -345,7 +345,7 @@ # Note: Setting this value too low may result in incomplete indexing, while setting it too high # may increase startup time. # - # type: int + # type: uint64 # env var: LOTUS_CHAININDEXER_MAXRECONCILETIPSETS #MaxReconcileTipsets = 8640 diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 53ea866a26d..914f3e72274 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -117,7 +117,7 @@ controlled using the MaxReconcileTipsets option.`, }, { Name: "MaxReconcileTipsets", - Type: "int", + Type: "uint64", Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. It represents the maximum number of tipsets to index from the chain state that are absent in the index. diff --git a/node/config/types.go b/node/config/types.go index 74c4c3972e4..126cd7820c3 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -655,7 +655,7 @@ type ChainIndexerConfig struct { // // Note: Setting this value too low may result in incomplete indexing, while setting it too high // may increase startup time. - MaxReconcileTipsets int + MaxReconcileTipsets uint64 } type HarmonyDB struct { From f5a5c617f16b8318b9c8417f18a134a4e062a5ee Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 14:43:04 +0400 Subject: [PATCH 50/66] changes to reconciliation as per review --- chain/index/reconcile.go | 181 ++++++++++++++++++++++--------------- node/modules/chainindex.go | 2 +- 2 files changed, 109 insertions(+), 74 deletions(-) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index b06ba327d1b..70156c63000 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) @@ -46,13 +47,13 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip isIndexEmpty := !hasTipset if isIndexEmpty && !si.reconcileEmptyIndex { - log.Info("Chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") + log.Info("chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") return nil } if isIndexEmpty { - log.Info("Chain index is empty; backfilling from head") - return si.backfillEmptyIndex(ctx, tx, head) + log.Info("chain index is empty; backfilling from head") + return si.backfillIndex(ctx, tx, head, 0) } // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk @@ -75,13 +76,12 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip currTs := head - log.Infof("Starting chain reconciliation from head height %d; searching for base reconciliation height above %d)", head.Height(), reconciliationEpoch) - var missingTipsets []*types.TipSet + log.Infof("starting chain reconciliation from head height %d; reconciliation epoch is %d", head.Height(), reconciliationEpoch) // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset // in the db so we know where to start reconciliation from // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted - // All tpsets that exist in the canonical chain but not in the db are then applied + // All tipsets that exist in the canonical chain but not in the db are then applied // we only need to walk back as far as the reconciliation epoch as all the tipsets in the index // below the reconciliation epoch are already marked as reverted because the reconciliation epoch @@ -101,16 +101,10 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip if exists { // found it! reconciliationEpoch = currTs.Height() + 1 - log.Infof("Found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) + log.Infof("found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) break } - if uint64(len(missingTipsets)) < si.maxReconcileTipsets { - missingTipsets = append(missingTipsets, currTs) - } - // even if len(missingTipsets) >= si.maxReconcileTipsets, we still need to continue the walk - // to find the final reconciliation epoch so we can mark the indexed tipsets not in the main chain as reverted - if currTs.Height() == 0 { log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") break @@ -143,89 +137,130 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip return xerrors.Errorf("failed to mark events as reverted: %w", err) } - log.Infof("Marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + log.Infof("marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + + // if the head is less than the reconciliation epoch, we don't need to index any tipsets as we're already caught up + if head.Height() < reconciliationEpoch { + log.Info("no missing tipsets to index; index is already caught up with chain") + return nil + } + + // apply all missing tipsets by walking the chain backwards starting from head upto the reconciliation epoch + log.Infof("indexing missing tipsets backwards from head height %d to reconciliation epoch %d", head.Height(), reconciliationEpoch) - return si.applyMissingTipsets(ctx, tx, missingTipsets) + // if head.Height == reconciliationEpoch, this will only index head and return + if err := si.backfillIndex(ctx, tx, head, reconciliationEpoch); err != nil { + return xerrors.Errorf("failed to backfill index: %w", err) + } + + return nil }) } -func (si *SqliteIndexer) backfillEmptyIndex(ctx context.Context, tx *sql.Tx, head *types.TipSet) error { +// backfillIndex backfills the chain index with missing tipsets starting from the given head tipset +// and stopping after the specified stopAfter epoch (inclusive). +// +// The behavior of this function depends on the relationship between head.Height and stopAfter: +// +// 1. If head.Height > stopAfter: +// - The function will apply missing tipsets from head.Height down to stopAfter (inclusive). +// - It will stop applying tipsets if the maximum number of tipsets to apply (si.maxReconcileTipsets) is reached. +// - If the chain store only contains data up to a certain height, the function will stop backfilling at that height. +// +// 2. If head.Height == stopAfter: +// - The function will only apply the head tipset and then return. +// +// 3. If head.Height < stopAfter: +// - The function will immediately return without applying any tipsets. +// +// The si.maxReconcileTipsets parameter is used to limit the maximum number of tipsets that can be applied during the backfill process. +// If the number of applied tipsets reaches si.maxReconcileTipsets, the function will stop backfilling and return. +// +// The function also logs progress information at regular intervals (every builtin.EpochsInDay) to provide visibility into the backfill process. +func (si *SqliteIndexer) backfillIndex(ctx context.Context, tx *sql.Tx, head *types.TipSet, stopAfter abi.ChainEpoch) error { + if head.Height() < stopAfter { + return nil + } + currTs := head - var missingTipsets []*types.TipSet + totalApplied := uint64(0) + lastLoggedEpoch := head.Height() - log.Infof("Backfilling empty chain index from head height %d", head.Height()) - var err error + log.Infof("backfilling chain index backwards starting from head height %d", head.Height()) + + // Calculate the actual number of tipsets to apply + totalTipsetsToApply := min(uint64(head.Height()-stopAfter+1), si.maxReconcileTipsets) + + for currTs != nil { + if totalApplied >= si.maxReconcileTipsets { + log.Infof("reached maximum number of tipsets to apply (%d), finishing backfill; backfill applied %d tipsets", + si.maxReconcileTipsets, totalApplied) + return nil + } + + err := si.applyMissingTipset(ctx, tx, currTs) + if err != nil { + if ipld.IsNotFound(err) { + log.Infof("stopping backfill at height %d as chain store only contains data up to this height; backfill applied %d tipsets", + currTs.Height(), totalApplied) + return nil + } + + return xerrors.Errorf("failed to apply tipset at height %d: %w", currTs.Height(), err) + } + + totalApplied++ + + if lastLoggedEpoch-currTs.Height() >= builtin.EpochsInDay { + progress := float64(totalApplied) / float64(totalTipsetsToApply) * 100 + log.Infof("backfill progress: %.2f%% complete (%d tipsets applied; total to apply: %d), ongoing", progress, totalApplied, totalTipsetsToApply) + lastLoggedEpoch = currTs.Height() + } - for currTs != nil && uint64(len(missingTipsets)) < si.maxReconcileTipsets { - missingTipsets = append(missingTipsets, currTs) if currTs.Height() == 0 { - break + log.Infof("reached genesis tipset and have backfilled everything up to genesis; backfilled %d tipsets", totalApplied) + return nil + } + + if currTs.Height() <= stopAfter { + log.Infof("reached stop height %d; backfilled %d tipsets", stopAfter, totalApplied) + return nil } currTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) if err != nil { - return xerrors.Errorf("failed to walk chain: %w", err) + return xerrors.Errorf("failed to walk chain at height %d: %w", currTs.Height(), err) } } - return si.applyMissingTipsets(ctx, tx, missingTipsets) + log.Infof("applied %d tipsets during backfill", totalApplied) + return nil } -func (si *SqliteIndexer) applyMissingTipsets(ctx context.Context, tx *sql.Tx, missingTipsets []*types.TipSet) error { - if len(missingTipsets) == 0 { - log.Info("No missing tipsets to index; index is all caught up with the chain") - return nil +// applyMissingTipset indexes a single missing tipset and its parent events +// It's a simplified version of applyMissingTipsets, handling one tipset at a time +func (si *SqliteIndexer) applyMissingTipset(ctx context.Context, tx *sql.Tx, currTs *types.TipSet) error { + if currTs == nil { + return xerrors.Errorf("failed to apply missing tipset: tipset is nil") } - log.Infof("Applying %d missing tipsets to Index; max missing tipset height %d; min missing tipset height %d", len(missingTipsets), - missingTipsets[0].Height(), missingTipsets[len(missingTipsets)-1].Height()) - totalIndexed := 0 - - // apply all missing tipsets from the canonical chain to the current chain head - for i := 0; i < len(missingTipsets); i++ { - currTs := missingTipsets[i] - var parentTs *types.TipSet - var err error - - if i < len(missingTipsets)-1 { - // a caller must supply a reverse-ordered contiguous list of missingTipsets - parentTs = missingTipsets[i+1] - } else if currTs.Height() > 0 { - parentTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) - if err != nil { - return xerrors.Errorf("failed to get parent tipset: %w", err) - } - } else if currTs.Height() == 0 { - if err := si.indexTipset(ctx, tx, currTs); err != nil { - log.Warnf("failed to index genesis tipset during reconciliation: %s", err) - } else { - totalIndexed++ - } - break - } - - if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { - if !ipld.IsNotFound(err) { - return xerrors.Errorf("failed to index tipset with parent events during reconciliation: %w", err) - } - // the above could have failed because of missing messages for `parentTs` in the chainstore - // so try to index only the currentTs and then halt the reconciliation process as we've - // reached the end of what we have in the chainstore - if err := si.indexTipset(ctx, tx, currTs); err != nil { - if !ipld.IsNotFound(err) { - return xerrors.Errorf("failed to index tipset during reconciliation: %w", err) - } - log.Infof("stopping reconciliation at height %d as chainstore only contains data up to this height; error is %s", currTs.Height(), err) - break - } - totalIndexed++ - break + // Special handling for genesis tipset + if currTs.Height() == 0 { + if err := si.indexTipset(ctx, tx, currTs); err != nil { + return xerrors.Errorf("failed to index genesis tipset: %w", err) } + return nil + } - totalIndexed++ + parentTs, err := si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) } - log.Infof("Indexed %d missing tipsets during reconciliation", totalIndexed) + // Index the tipset along with its parent events + if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { + return xerrors.Errorf("failed to index tipset with parent events: %w", err) + } return nil } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 59128c47132..3b8077c072b 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -92,7 +92,7 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer index.In unlockObserver() return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) } - log.Infof("Chain indexer reconciled with chain state; observer will start upates from height: %d", head.Height()) + log.Infof("chain indexer reconciled with chain state; observer will start upates from height: %d", head.Height()) unlockObserver() if err := indexer.Start(); err != nil { From 730d00a303baab3e72a1303c4ed1a90a991b2b0a Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 14:44:40 +0400 Subject: [PATCH 51/66] log ipld error --- chain/index/reconcile.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 70156c63000..5d4aa2ee5ad 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -201,8 +201,8 @@ func (si *SqliteIndexer) backfillIndex(ctx context.Context, tx *sql.Tx, head *ty err := si.applyMissingTipset(ctx, tx, currTs) if err != nil { if ipld.IsNotFound(err) { - log.Infof("stopping backfill at height %d as chain store only contains data up to this height; backfill applied %d tipsets", - currTs.Height(), totalApplied) + log.Infof("stopping backfill at height %d as chain store only contains data up to this height as per error %s; backfill applied %d tipsets", + currTs.Height(), err, totalApplied) return nil } From c099abf13a908d0a9a913200e4d6b1615ba84bf3 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 14:45:30 +0400 Subject: [PATCH 52/66] better logging of progress --- chain/index/reconcile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 5d4aa2ee5ad..828e918cb47 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -213,7 +213,7 @@ func (si *SqliteIndexer) backfillIndex(ctx context.Context, tx *sql.Tx, head *ty if lastLoggedEpoch-currTs.Height() >= builtin.EpochsInDay { progress := float64(totalApplied) / float64(totalTipsetsToApply) * 100 - log.Infof("backfill progress: %.2f%% complete (%d tipsets applied; total to apply: %d), ongoing", progress, totalApplied, totalTipsetsToApply) + log.Infof("backfill progress: %.2f%% complete (%d out of %d tipsets applied), ongoing", progress, totalApplied, totalTipsetsToApply) lastLoggedEpoch = currTs.Height() } From 951ce772485a6ab03041f818305e643a6fb734a6 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 15:32:10 +0400 Subject: [PATCH 53/66] disable chain indexer hydrate from snapshot based on config --- cmd/lotus/daemon.go | 20 +++++++++++++++++--- node/builder_chain.go | 2 +- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index eb33ac731a5..fbe3ba6bf11 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -50,6 +50,7 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/testing" @@ -628,18 +629,31 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } + c, err := lr.Config() + if err != nil { + return err + } + cfg, ok := c.(*config.FullNode) + if !ok { + return xerrors.Errorf("invalid config for repo, got: %T", c) + } + + if cfg.ChainIndexer.DisableIndexer { + log.Info("chain indexer is disabled, not populating index from snapshot") + return nil + } + // populate the chain Index from the snapshot - // basePath, err := lr.SqlitePath() if err != nil { return err } - log.Info("populating chain index...") + log.Info("populating chain index from snapshot...") if err := index.PopulateFromSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { return err } - log.Info("populating chain index done") + log.Info("populating chain index from snapshot done") return nil } diff --git a/node/builder_chain.go b/node/builder_chain.go index 490bfefd703..f770ef2f449 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -238,7 +238,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || !cfg.ChainIndexer.DisableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), From ad6c0864bc3c6bbafb528b2675eb629bcf546c33 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 15:33:27 +0400 Subject: [PATCH 54/66] always populate index --- cmd/lotus/daemon.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index fbe3ba6bf11..af8c52163fd 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -50,7 +50,6 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/testing" @@ -629,20 +628,6 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - c, err := lr.Config() - if err != nil { - return err - } - cfg, ok := c.(*config.FullNode) - if !ok { - return xerrors.Errorf("invalid config for repo, got: %T", c) - } - - if cfg.ChainIndexer.DisableIndexer { - log.Info("chain indexer is disabled, not populating index from snapshot") - return nil - } - // populate the chain Index from the snapshot basePath, err := lr.SqlitePath() if err != nil { From 52e104d86db5aa83f252d10150abed311bd06c23 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 15:38:23 +0400 Subject: [PATCH 55/66] make config easy to reason about --- chain/index/indexer.go | 6 ++++++ cmd/lotus/daemon.go | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 06766d2aa2a..cf194111ef9 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -3,6 +3,7 @@ package index import ( "context" "database/sql" + "errors" "sync" "github.com/ipfs/go-cid" @@ -70,6 +71,11 @@ type SqliteIndexer struct { func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { + + if !cs.IsStoringEvents() { + return nil, errors.New("indexer can only be enabled if event storage is enabled; partial indexing is not supported for now") + } + db, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index af8c52163fd..fbe3ba6bf11 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -50,6 +50,7 @@ import ( "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/testing" @@ -628,6 +629,20 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } + c, err := lr.Config() + if err != nil { + return err + } + cfg, ok := c.(*config.FullNode) + if !ok { + return xerrors.Errorf("invalid config for repo, got: %T", c) + } + + if cfg.ChainIndexer.DisableIndexer { + log.Info("chain indexer is disabled, not populating index from snapshot") + return nil + } + // populate the chain Index from the snapshot basePath, err := lr.SqlitePath() if err != nil { From 9c6c72875ba8c7d39b81a1e2f984534191f930e0 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 17:19:35 +0400 Subject: [PATCH 56/66] fix config --- chain/index/indexer.go | 3 +-- node/builder_chain.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index cf194111ef9..3c31f5d5778 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -3,7 +3,6 @@ package index import ( "context" "database/sql" - "errors" "sync" "github.com/ipfs/go-cid" @@ -73,7 +72,7 @@ func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, recon maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { if !cs.IsStoringEvents() { - return nil, errors.New("indexer can only be enabled if event storage is enabled; partial indexing is not supported for now") + log.Warn("indexer initialised with event storage disabled; please ensure that this is intentional") } db, err := sqlite.Open(path) diff --git a/node/builder_chain.go b/node/builder_chain.go index f770ef2f449..490bfefd703 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -238,7 +238,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || !cfg.ChainIndexer.DisableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), From 1da1e074580b76786247bcad82bc338e98585fb0 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 18:55:35 +0400 Subject: [PATCH 57/66] fix messaging --- chain/index/indexer.go | 4 ---- chain/index/reconcile.go | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 3c31f5d5778..c4257527ea3 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -71,10 +71,6 @@ type SqliteIndexer struct { func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { - if !cs.IsStoringEvents() { - log.Warn("indexer initialised with event storage disabled; please ensure that this is intentional") - } - db, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 828e918cb47..2a36a93db58 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -27,6 +27,10 @@ import ( // This function is crucial for maintaining index integrity, especially after chain reorgs. // It ensures that the index accurately reflects the current state of the blockchain. func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { + if !si.cs.IsStoringEvents() { + log.Warn("chain indexer is not storing events during reconciliation; please ensure this is intentional") + } + si.closeLk.RLock() if si.closed { si.closeLk.RUnlock() From efe90f8f671706a178e7949bbb5e71e38c43c493 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Sat, 14 Sep 2024 19:29:18 +0400 Subject: [PATCH 58/66] revert config changes --- documentation/en/default-lotus-config.toml | 6 +++--- node/config/def.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index b8f744b9c1d..d856f0e65c0 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -226,7 +226,7 @@ # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC - #EnableEthRPC = false + #EnableEthRPC = true # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # @@ -260,7 +260,7 @@ # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI - #EnableActorEventsAPI = false + #EnableActorEventsAPI = true # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than # this time become eligible for automatic deletion. Filters consume resources, so if they are unused they @@ -306,7 +306,7 @@ # # type: bool # env var: LOTUS_CHAININDEXER_DISABLEINDEXER - #DisableIndexer = true + #DisableIndexer = false # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. # The garbage collection (GC) process removes data older than this retention period. diff --git a/node/config/def.go b/node/config/def.go index 5cb69358336..7cf8a29e7a5 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,20 +83,20 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: false, + EnableEthRPC: true, EthTraceFilterMaxResults: 500, EthBlkCacheSize: 500, }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, - EnableActorEventsAPI: false, + EnableActorEventsAPI: true, FilterTTL: Duration(time.Hour * 1), MaxFilters: 100, MaxFilterResults: 10000, MaxFilterHeightRange: 2880, // conservative limit of one day }, ChainIndexer: ChainIndexerConfig{ - DisableIndexer: true, + DisableIndexer: false, GCRetentionEpochs: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, From c121321997cf4ba785b2caa0f8443733ab4128d9 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Mon, 16 Sep 2024 13:05:43 +0400 Subject: [PATCH 59/66] Apply suggestions from code review Co-authored-by: Rod Vagg --- chain/index/helpers.go | 2 +- lib/sqlite/sqlite.go | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/chain/index/helpers.go b/chain/index/helpers.go index eab54256589..682020e3d82 100644 --- a/chain/index/helpers.go +++ b/chain/index/helpers.go @@ -56,7 +56,7 @@ func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error for curTs != nil { if err := si.indexTipset(ctx, tx, curTs); err != nil { if ipld.IsNotFound(err) { - log.Infof("stopping chainindex population at height %d as snapshot only contains data upto this height; error is %s", curTs.Height(), err) + log.Infof("stopping chainindex population at height %d as snapshot only contains data upto this height; error is: %s", curTs.Height(), err) break } diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index 15c965419d2..ffb15a7b17e 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -77,8 +77,6 @@ func Open(path string) (*sql.DB, error) { return nil, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) } - log.Infof("Database [@ %s] opened successfully with foreign keys enabled", path) - return db, nil } From 2ff1d425acb1982d267fb3c628abd252c8cf70f2 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 16 Sep 2024 13:33:21 +0400 Subject: [PATCH 60/66] changes as per review --- documentation/en/default-lotus-config.toml | 6 +++--- node/builder_chain.go | 8 +++++--- node/config/def.go | 6 +++--- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index d856f0e65c0..b8f744b9c1d 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -226,7 +226,7 @@ # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC - #EnableEthRPC = true + #EnableEthRPC = false # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # @@ -260,7 +260,7 @@ # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI - #EnableActorEventsAPI = true + #EnableActorEventsAPI = false # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than # this time become eligible for automatic deletion. Filters consume resources, so if they are unused they @@ -306,7 +306,7 @@ # # type: bool # env var: LOTUS_CHAININDEXER_DISABLEINDEXER - #DisableIndexer = false + #DisableIndexer = true # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. # The garbage collection (GC) process removes data older than this retention period. diff --git a/node/builder_chain.go b/node/builder_chain.go index 490bfefd703..d669c312aab 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -282,9 +282,11 @@ func ConfigFullNode(c interface{}) Option { Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), - Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), - If(!cfg.ChainIndexer.DisableIndexer, - Override(InitChainIndexerKey, modules.InitChainIndexer), + ApplyIf(isFullNode, + Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), + If(!cfg.ChainIndexer.DisableIndexer, + Override(InitChainIndexerKey, modules.InitChainIndexer), + ), ), ) } diff --git a/node/config/def.go b/node/config/def.go index 7cf8a29e7a5..5cb69358336 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,20 +83,20 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: true, + EnableEthRPC: false, EthTraceFilterMaxResults: 500, EthBlkCacheSize: 500, }, Events: EventsConfig{ DisableRealTimeFilterAPI: false, - EnableActorEventsAPI: true, + EnableActorEventsAPI: false, FilterTTL: Duration(time.Hour * 1), MaxFilters: 100, MaxFilterResults: 10000, MaxFilterHeightRange: 2880, // conservative limit of one day }, ChainIndexer: ChainIndexerConfig{ - DisableIndexer: false, + DisableIndexer: true, GCRetentionEpochs: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, From c945bb58a1c5b5b24508912af9bfcbffba0a33b8 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 16 Sep 2024 14:23:57 +0400 Subject: [PATCH 61/66] make error messages homogenous --- chain/index/events.go | 10 +++++----- chain/index/indexer.go | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/chain/index/events.go b/chain/index/events.go index 6000ef348a8..9261fb5acc4 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -127,14 +127,14 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { msgs, err := si.cs.MessagesForTipset(ctx, msgTs) if err != nil { - return nil, xerrors.Errorf("error getting messages for tipset: %w", err) + return nil, xerrors.Errorf("failed to get messages for tipset: %w", err) } st := si.cs.ActorStore(ctx) receiptsArr, err := blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) if err != nil { - return nil, xerrors.Errorf("error loading message receipts array: %w", err) + return nil, xerrors.Errorf("failed to load message receipts array: %w", err) } if uint64(len(msgs)) != receiptsArr.Length() { @@ -149,7 +149,7 @@ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs var rct types.MessageReceipt found, err := receiptsArr.Get(uint64(i), &rct) if err != nil { - return nil, xerrors.Errorf("error loading receipt %d: %w", i, err) + return nil, xerrors.Errorf("failed to load receipt %d: %w", i, err) } if !found { return nil, xerrors.Errorf("receipt %d not found", i) @@ -163,7 +163,7 @@ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs eventsArr, err := amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) if err != nil { - return nil, xerrors.Errorf("error loading events amt: %w", err) + return nil, xerrors.Errorf("failed to load events amt: %w", err) } ems[i].evs = make([]types.Event, eventsArr.Len()) @@ -182,7 +182,7 @@ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs }) if err != nil { - return nil, xerrors.Errorf("error iterating over events for message %d: %w", i, err) + return nil, xerrors.Errorf("failed to iterate over events for message %d: %w", i, err) } } diff --git a/chain/index/indexer.go b/chain/index/indexer.go index c4257527ea3..4e0157e213c 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -195,12 +195,12 @@ func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.Sign func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg *types.SignedMessage) error { ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) if err != nil { - return xerrors.Errorf("error converting filecoin message to eth tx: %w", err) + return xerrors.Errorf("failed to convert filecoin message to eth tx: %w", err) } txHash, err := ethTx.TxHash() if err != nil { - return xerrors.Errorf("error hashing transaction: %w", err) + return xerrors.Errorf("failed to hash transaction: %w", err) } return si.indexEthTxHash(ctx, tx, txHash, msg.Cid()) @@ -218,14 +218,14 @@ func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) erro // Height(to) > Height(from) err := withTx(ctx, si.db, func(tx *sql.Tx) error { if err := si.indexTipsetWithParentEvents(ctx, tx, from, to); err != nil { - return xerrors.Errorf("error indexing tipset: %w", err) + return xerrors.Errorf("failed to index tipset: %w", err) } return nil }) if err != nil { - return xerrors.Errorf("error applying tipset: %w", err) + return xerrors.Errorf("failed to apply tipset: %w", err) } si.notifyUpdateSubs() @@ -336,31 +336,31 @@ func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) err revertTsKeyCid, err := toTipsetKeyCidBytes(from) if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) + return xerrors.Errorf("failed to get tipset key cid: %w", err) } // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. // However, the tipet `T` itself is not reverted. eventTsKeyCid, err := toTipsetKeyCidBytes(to) if err != nil { - return xerrors.Errorf("error getting tipset key cid: %w", err) + return xerrors.Errorf("failed to get tipset key cid: %w", err) } err = withTx(ctx, si.db, func(tx *sql.Tx) error { if _, err := tx.Stmt(si.stmts.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { - return xerrors.Errorf("error marking tipset %s as reverted: %w", revertTsKeyCid, err) + return xerrors.Errorf("failed to mark tipset %s as reverted: %w", revertTsKeyCid, err) } // events are indexed against the message inclusion tipset, not the message execution tipset. // So we need to revert the events for the message inclusion tipset. if _, err := tx.Stmt(si.stmts.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { - return xerrors.Errorf("error reverting events for tipset %s: %w", eventTsKeyCid, err) + return xerrors.Errorf("failed to revert events for tipset %s: %w", eventTsKeyCid, err) } return nil }) if err != nil { - return xerrors.Errorf("error during revert transaction: %w", err) + return xerrors.Errorf("failed during revert transaction: %w", err) } si.notifyUpdateSubs() From 432e09a637a473623e073f9a672742a63dd11eb6 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 16 Sep 2024 14:30:01 +0400 Subject: [PATCH 62/66] fix indentation --- chain/store/store_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 4df171d2fa7..81569149c01 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -215,8 +215,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), - ds, nil) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, nil) if err != nil { t.Fatal(err) } From af9bc2338db5dc7bba80d91eddef22643d26a4cd Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 16 Sep 2024 14:46:41 +0400 Subject: [PATCH 63/66] changes as per review --- chain/index/events.go | 24 --------------------- cmd/lotus/daemon.go | 2 +- documentation/en/default-lotus-config.toml | 13 ++++++----- itests/direct_data_onboard_verified_test.go | 2 +- itests/kit/node_opts.go | 2 +- node/builder_chain.go | 4 ++-- node/config/def.go | 2 +- node/config/doc_gen.go | 11 +++++----- node/config/types.go | 11 +++++----- node/modules/chainindex.go | 2 +- 10 files changed, 23 insertions(+), 50 deletions(-) diff --git a/chain/index/events.go b/chain/index/events.go index 9261fb5acc4..6326e70615f 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -369,10 +369,6 @@ func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, return ces, nil } - if err := si.sanityCheckFilter(ctx, f); err != nil { - return nil, xerrors.Errorf("event filter is invalid: %w", err) - } - values, query := makePrefillFilterQuery(f, excludeReverted) stmt, err := si.db.Prepare(query) @@ -403,26 +399,6 @@ func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter, return ces, nil } -func (si *SqliteIndexer) sanityCheckFilter(ctx context.Context, f *EventFilter) error { - head := si.cs.GetHeaviestTipSet() - - if f.TipsetCid != cid.Undef { - ts, err := si.cs.GetTipSetByCid(ctx, f.TipsetCid) - if err != nil { - return xerrors.Errorf("failed to get tipset by cid: %w", err) - } - if ts.Height() >= head.Height() { - return xerrors.New("cannot ask for events for a tipset >= head") - } - } - - if f.MinHeight >= head.Height() || f.MaxHeight >= head.Height() { - return xerrors.New("cannot ask for events for a tipset >= head") - } - - return nil -} - func makePrefillFilterQuery(f *EventFilter, excludeReverted bool) ([]any, string) { clauses := []string{} values := []any{} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index fbe3ba6bf11..5dcf124c39b 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -638,7 +638,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("invalid config for repo, got: %T", c) } - if cfg.ChainIndexer.DisableIndexer { + if !cfg.ChainIndexer.EnableIndexer { log.Info("chain indexer is disabled, not populating index from snapshot") return nil } diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index b8f744b9c1d..0a1247c95b8 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -294,19 +294,18 @@ [ChainIndexer] - # DisableIndexer controls whether the chain indexer is active. + # EnableIndexer controls whether the chain indexer is active. # The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. # It is a crucial component for optimizing Lotus RPC response times. # - # Default: false (indexer is enabled) + # Default: false (indexer is disabled) # - # Setting this to true will disable the indexer, which may significantly impact RPC performance. - # It is strongly recommended to keep this set to false unless you have a specific reason to disable it - # and fully understand the implications. + # Setting this to true will enable the indexer, which will significantly improve RPC performance. + # It is strongly recommended to keep this set to true if you are an RPC provider. # # type: bool - # env var: LOTUS_CHAININDEXER_DISABLEINDEXER - #DisableIndexer = true + # env var: LOTUS_CHAININDEXER_ENABLEINDEXER + #EnableIndexer = false # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. # The garbage collection (GC) process removes data older than this retention period. diff --git a/itests/direct_data_onboard_verified_test.go b/itests/direct_data_onboard_verified_test.go index 854a3b75c40..8d3d9fef78c 100644 --- a/itests/direct_data_onboard_verified_test.go +++ b/itests/direct_data_onboard_verified_test.go @@ -135,7 +135,7 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { // subscribe to actor events up until the current head initialEventsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ FromHeight: epochPtr(0), - ToHeight: epochPtr(int64(head.Height()) - 1), + ToHeight: epochPtr(int64(head.Height())), }) require.NoError(t, err) diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 54fe12442d9..ed7bb03ea16 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -65,7 +65,7 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true - cfg.ChainIndexer.DisableIndexer = false + cfg.ChainIndexer.EnableIndexer = true cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true cfg.ChainIndexer.ReconcileEmptyIndex = true diff --git a/node/builder_chain.go b/node/builder_chain.go index d669c312aab..0f0c7102e6b 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -238,7 +238,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || !cfg.ChainIndexer.DisableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || cfg.ChainIndexer.EnableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), @@ -284,7 +284,7 @@ func ConfigFullNode(c interface{}) Option { ApplyIf(isFullNode, Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), - If(!cfg.ChainIndexer.DisableIndexer, + If(cfg.ChainIndexer.EnableIndexer, Override(InitChainIndexerKey, modules.InitChainIndexer), ), ), diff --git a/node/config/def.go b/node/config/def.go index 5cb69358336..1a2b4d85776 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -96,7 +96,7 @@ func DefaultFullNode() *FullNode { MaxFilterHeightRange: 2880, // conservative limit of one day }, ChainIndexer: ChainIndexerConfig{ - DisableIndexer: true, + EnableIndexer: false, GCRetentionEpochs: 0, ReconcileEmptyIndex: false, MaxReconcileTipsets: 3 * builtin.EpochsInDay, diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 914f3e72274..534a6fb4abb 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -73,18 +73,17 @@ your node if metadata log is disabled`, }, "ChainIndexerConfig": { { - Name: "DisableIndexer", + Name: "EnableIndexer", Type: "bool", - Comment: `DisableIndexer controls whether the chain indexer is active. + Comment: `EnableIndexer controls whether the chain indexer is active. The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. It is a crucial component for optimizing Lotus RPC response times. -Default: false (indexer is enabled) +Default: false (indexer is disabled) -Setting this to true will disable the indexer, which may significantly impact RPC performance. -It is strongly recommended to keep this set to false unless you have a specific reason to disable it -and fully understand the implications.`, +Setting this to true will enable the indexer, which will significantly improve RPC performance. +It is strongly recommended to keep this set to true if you are an RPC provider.`, }, { Name: "GCRetentionEpochs", diff --git a/node/config/types.go b/node/config/types.go index 126cd7820c3..78bc3fd13fb 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -614,16 +614,15 @@ type EventsConfig struct { } type ChainIndexerConfig struct { - // DisableIndexer controls whether the chain indexer is active. + // EnableIndexer controls whether the chain indexer is active. // The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. // It is a crucial component for optimizing Lotus RPC response times. // - // Default: false (indexer is enabled) + // Default: false (indexer is disabled) // - // Setting this to true will disable the indexer, which may significantly impact RPC performance. - // It is strongly recommended to keep this set to false unless you have a specific reason to disable it - // and fully understand the implications. - DisableIndexer bool + // Setting this to true will enable the indexer, which will significantly improve RPC performance. + // It is strongly recommended to keep this set to true if you are an RPC provider. + EnableIndexer bool // GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. // The garbage collection (GC) process removes data older than this retention period. diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index 3b8077c072b..ee3decafec4 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -23,7 +23,7 @@ import ( func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { - if cfg.DisableIndexer { + if !cfg.EnableIndexer { log.Infof("ChainIndexer is disabled") return nil, nil } From 6d84b03691dab375374e00e0d4a8f84a1d3e40a4 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Mon, 16 Sep 2024 14:54:29 +0400 Subject: [PATCH 64/66] feat: recompute tipset to generate missing events if event indexing is enabled (#12463) * auto repair events * make jen * fix leaky abstraction --- chain/index/events.go | 16 +++++++++++++++- chain/index/indexer.go | 8 +++++++- chain/index/interface.go | 1 + node/modules/chainindex.go | 5 +++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/chain/index/events.go b/chain/index/events.go index 6326e70615f..1f4445eee08 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -163,7 +163,21 @@ func (si *SqliteIndexer) loadExecutedMessages(ctx context.Context, msgTs, rctTs eventsArr, err := amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) if err != nil { - return nil, xerrors.Errorf("failed to load events amt: %w", err) + if si.recomputeTipSetStateFunc == nil { + return nil, xerrors.Errorf("failed to load events amt for message %s: %w", ems[i].msg.Cid(), err) + } + log.Warnf("failed to load events amt for message %s: %s; recomputing tipset state to regenerate events", ems[i].msg.Cid(), err) + + if err := si.recomputeTipSetStateFunc(ctx, msgTs); err != nil { + return nil, xerrors.Errorf("failed to recompute missing events; failed to recompute tipset state: %w", err) + } + + eventsArr, err = amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + return nil, xerrors.Errorf("failed to load events amt for message %s: %w", ems[i].msg.Cid(), err) + } + + log.Infof("successfully recomputed tipset state and loaded events amt for message %s", ems[i].msg.Cid()) } ems[i].evs = make([]types.Event, eventsArr.Len()) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index 4e0157e213c..d341cd19757 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -21,6 +21,7 @@ var _ Indexer = (*SqliteIndexer)(nil) // IdToRobustAddrFunc is a function type that resolves an actor ID to a robust address type IdToRobustAddrFunc func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) +type recomputeTipSetStateFunc func(ctx context.Context, ts *types.TipSet) error type preparedStatements struct { insertEthTxHashStmt *sql.Stmt @@ -52,7 +53,8 @@ type SqliteIndexer struct { db *sql.DB cs ChainStore - idToRobustAddrFunc IdToRobustAddrFunc + idToRobustAddrFunc IdToRobustAddrFunc + recomputeTipSetStateFunc recomputeTipSetStateFunc stmts *preparedStatements @@ -120,6 +122,10 @@ func (si *SqliteIndexer) SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddr si.idToRobustAddrFunc = idToRobustAddrFunc } +func (si *SqliteIndexer) SetRecomputeTipSetStateFunc(recomputeTipSetStateFunc recomputeTipSetStateFunc) { + si.recomputeTipSetStateFunc = recomputeTipSetStateFunc +} + func (si *SqliteIndexer) Close() error { si.closeLk.Lock() defer si.closeLk.Unlock() diff --git a/chain/index/interface.go b/chain/index/interface.go index 8e695d95d36..67b43f3be4f 100644 --- a/chain/index/interface.go +++ b/chain/index/interface.go @@ -56,6 +56,7 @@ type Indexer interface { IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error SetIdToRobustAddrFunc(idToRobustAddrFunc IdToRobustAddrFunc) + SetRecomputeTipSetStateFunc(recomputeTipSetStateFunc recomputeTipSetStateFunc) Apply(ctx context.Context, from, to *types.TipSet) error Revert(ctx context.Context, from, to *types.TipSet) error diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go index ee3decafec4..e0a44ed1fe4 100644 --- a/node/modules/chainindex.go +++ b/node/modules/chainindex.go @@ -70,6 +70,11 @@ func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer index.In return *actor.DelegatedAddress, true }) + indexer.SetRecomputeTipSetStateFunc(func(ctx context.Context, ts *types.TipSet) error { + _, _, err := sm.RecomputeTipSetState(ctx, ts) + return err + }) + ch, err := mp.Updates(ctx) if err != nil { return err From b9f1583bc3d7ed8f86f52573f4a802f8ed4bf261 Mon Sep 17 00:00:00 2001 From: aarshkshah1992 Date: Mon, 16 Sep 2024 17:20:11 +0400 Subject: [PATCH 65/66] better docs for gc retention epoch --- chain/index/indexer.go | 5 +++++ documentation/en/default-lotus-config.toml | 3 +++ node/config/doc_gen.go | 3 +++ node/config/types.go | 3 +++ 4 files changed, 14 insertions(+) diff --git a/chain/index/indexer.go b/chain/index/indexer.go index d341cd19757..bb237314bdb 100644 --- a/chain/index/indexer.go +++ b/chain/index/indexer.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/lib/sqlite" @@ -73,6 +74,10 @@ type SqliteIndexer struct { func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { + if gcRetentionEpochs != 0 && gcRetentionEpochs <= builtin.EpochsInDay { + return nil, xerrors.Errorf("gc retention epochs must be 0 or greater than %d", builtin.EpochsInDay) + } + db, err := sqlite.Open(path) if err != nil { return nil, xerrors.Errorf("failed to setup message index db: %w", err) diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 0a1247c95b8..a821f73a5bd 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -311,6 +311,9 @@ # The garbage collection (GC) process removes data older than this retention period. # Setting this to 0 disables GC, preserving all historical data indefinitely. # + # If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + # This ensures a reasonable retention period for the indexed data. + # # Default: 0 (GC disabled) # # type: int64 diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 534a6fb4abb..209e21bb560 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -93,6 +93,9 @@ It is strongly recommended to keep this set to true if you are an RPC provider.` The garbage collection (GC) process removes data older than this retention period. Setting this to 0 disables GC, preserving all historical data indefinitely. +If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). +This ensures a reasonable retention period for the indexed data. + Default: 0 (GC disabled)`, }, { diff --git a/node/config/types.go b/node/config/types.go index 78bc3fd13fb..872dd9f95e0 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -628,6 +628,9 @@ type ChainIndexerConfig struct { // The garbage collection (GC) process removes data older than this retention period. // Setting this to 0 disables GC, preserving all historical data indefinitely. // + // If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + // This ensures a reasonable retention period for the indexed data. + // // Default: 0 (GC disabled) GCRetentionEpochs int64 From 1921abdcd3c092260d581bc377054f646c360045 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Thu, 19 Sep 2024 13:14:07 +0400 Subject: [PATCH 66/66] imrpove DB handling (#12485) --- chain/index/ddls.go | 6 +++--- chain/index/events.go | 6 +++--- chain/index/gc.go | 2 +- chain/index/reconcile.go | 42 +++++++++++++++++++++++++--------------- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/chain/index/ddls.go b/chain/index/ddls.go index c7002fde6b5..7c9f3fce0cc 100644 --- a/chain/index/ddls.go +++ b/chain/index/ddls.go @@ -59,8 +59,8 @@ var ddls = []string{ // the preparedStatements struct. func preparedStatementMapping(ps *preparedStatements) map[**sql.Stmt]string { return map[**sql.Stmt]string{ - &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0", - &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ?", + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0 LIMIT 1", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ? LIMIT 1", &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", @@ -75,7 +75,7 @@ func preparedStatementMapping(ps *preparedStatements) map[**sql.Stmt]string { &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE tipset_key_cid = ?)", &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT message_id FROM tipset_message WHERE tipset_key_cid = ?)", - &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT message_id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0 LIMIT 1", &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?)", &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", } diff --git a/chain/index/events.go b/chain/index/events.go index 1f4445eee08..d3badeeeff8 100644 --- a/chain/index/events.go +++ b/chain/index/events.go @@ -71,7 +71,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // read message id for this message cid and tipset key cid var messageID int64 - if err := tx.Stmt(si.stmts.getMsgIdForMsgCidAndTipsetStmt).QueryRow(msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { + if err := tx.Stmt(si.stmts.getMsgIdForMsgCidAndTipsetStmt).QueryRowContext(ctx, msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) } if messageID == 0 { @@ -92,7 +92,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ } // Insert event into events table - eventResult, err := tx.Stmt(si.stmts.insertEventStmt).Exec(messageID, eventCount, addr.Bytes(), 0) + eventResult, err := tx.Stmt(si.stmts.insertEventStmt).ExecContext(ctx, messageID, eventCount, addr.Bytes(), 0) if err != nil { return xerrors.Errorf("failed to insert event: %w", err) } @@ -105,7 +105,7 @@ func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *typ // Insert event entries for _, entry := range event.Entries { - _, err := tx.Stmt(si.stmts.insertEventEntryStmt).Exec( + _, err := tx.Stmt(si.stmts.insertEventEntryStmt).ExecContext(ctx, eventID, isIndexedValue(entry.Flags), []byte{entry.Flags}, diff --git a/chain/index/gc.go b/chain/index/gc.go index 168b59507e9..1e643b35e9d 100644 --- a/chain/index/gc.go +++ b/chain/index/gc.go @@ -83,7 +83,7 @@ func (si *SqliteIndexer) gc(ctx context.Context) { } log.Infof("gc'ing eth hashes older than %d days", gcRetentionDays) - res, err = si.stmts.removeEthHashesOlderThanStmt.Exec("-" + strconv.Itoa(int(gcRetentionDays)) + " day") + res, err = si.stmts.removeEthHashesOlderThanStmt.ExecContext(ctx, "-"+strconv.Itoa(int(gcRetentionDays))+" day") if err != nil { log.Errorf("failed to gc eth hashes older than %d days: %w", gcRetentionDays, err) return diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go index 2a36a93db58..a139ff79852 100644 --- a/chain/index/reconcile.go +++ b/chain/index/reconcile.go @@ -60,22 +60,9 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip return si.backfillIndex(ctx, tx, head, 0) } - // Find the minimum applied tipset in the index; this will mark the absolute min height of the reconciliation walk - var reconciliationEpochInIndex sql.NullInt64 - var reconciliationEpoch abi.ChainEpoch - - row := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt).QueryRowContext(ctx) - if err := row.Scan(&reconciliationEpochInIndex); err != nil { - if err != sql.ErrNoRows { - return xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) - } - log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") - reconciliationEpoch = 0 - } else if !reconciliationEpochInIndex.Valid { - log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") - reconciliationEpoch = 0 - } else { - reconciliationEpoch = abi.ChainEpoch(reconciliationEpochInIndex.Int64) + reconciliationEpoch, err := si.getReconciliationEpoch(ctx, tx) + if err != nil { + return xerrors.Errorf("failed to get reconciliation epoch: %w", err) } currTs := head @@ -161,6 +148,29 @@ func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.Tip }) } +func (si *SqliteIndexer) getReconciliationEpoch(ctx context.Context, tx *sql.Tx) (abi.ChainEpoch, error) { + var reconciliationEpochInIndex sql.NullInt64 + + err := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt). + QueryRowContext(ctx). + Scan(&reconciliationEpochInIndex) + + if err != nil { + if err == sql.ErrNoRows { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + return 0, xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + } + + if !reconciliationEpochInIndex.Valid { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + + return abi.ChainEpoch(reconciliationEpochInIndex.Int64), nil +} + // backfillIndex backfills the chain index with missing tipsets starting from the given head tipset // and stopping after the specified stopAfter epoch (inclusive). //