diff --git a/chain/indexer.go b/chain/indexer.go index 08e3b01a7..9e594bd56 100644 --- a/chain/indexer.go +++ b/chain/indexer.go @@ -47,7 +47,7 @@ const ( MultisigApprovalsTask = "msapprovals" // task that extracts multisig actor approvals ) -var log = logging.Logger("chain") +var log = logging.Logger("visor/chain") var _ TipSetObserver = (*TipSetIndexer)(nil) diff --git a/chain/walker_test.go b/chain/walker_test.go index 0ef79da87..656a7019f 100644 --- a/chain/walker_test.go +++ b/chain/walker_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/filecoin-project/sentinel-visor/chain/actors/builtin" - "github.com/raulk/clock" apitest "github.com/filecoin-project/lotus/api/test" nodetest "github.com/filecoin-project/lotus/node/test" @@ -55,11 +54,8 @@ func TestWalker(t *testing.T) { cids := bhs.Cids() rounds := bhs.Rounds() - strg := &storage.Database{ - DB: db, - Clock: clock.NewMock(), - Upsert: false, - } + strg, err := storage.NewDatabaseFromDB(ctx, db, "public") + require.NoError(t, err, "NewDatabaseFromDB") tsIndexer, err := NewTipSetIndexer(opener, strg, builtin.EpochDurationSeconds*time.Second, t.Name(), []string{BlocksTask}) require.NoError(t, err, "NewTipSetIndexer") diff --git a/chain/watcher_test.go b/chain/watcher_test.go index 5e6c5b94e..d5c1881ba 100644 --- a/chain/watcher_test.go +++ b/chain/watcher_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/filecoin-project/sentinel-visor/chain/actors/builtin" - "github.com/raulk/clock" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -65,11 +64,8 @@ func TestWatcher(t *testing.T) { apitest.MineUntilBlock(ctx, t, node, sn[0], nil) - strg := &storage.Database{ - DB: db, - Clock: clock.NewMock(), - Upsert: false, - } + strg, err := storage.NewDatabaseFromDB(ctx, db, "public") + require.NoError(t, err, "NewDatabaseFromDB") tsIndexer, err := NewTipSetIndexer(opener, strg, builtin.EpochDurationSeconds*time.Second, t.Name(), []string{BlocksTask}) require.NoError(t, err, "NewTipSetIndexer") diff --git a/commands/migrate.go b/commands/migrate.go index d3dc0c24e..36f207567 100644 --- a/commands/migrate.go +++ b/commands/migrate.go @@ -33,7 +33,7 @@ var MigrateCmd = &cli.Command{ ctx := cctx.Context - db, err := storage.NewDatabase(ctx, cctx.String("db"), cctx.Int("db-pool-size"), cctx.String("name"), false) + db, err := storage.NewDatabase(ctx, cctx.String("db"), cctx.Int("db-pool-size"), cctx.String("name"), cctx.String("schema"), false) if err != nil { return xerrors.Errorf("connect database: %w", err) } @@ -62,6 +62,7 @@ var MigrateCmd = &cli.Command{ return xerrors.Errorf("verify schema: %w", err) } + log.Infof("database schema is supported by this version of visor") return nil }, } diff --git a/commands/run.go b/commands/run.go index b6ab64fe5..d86e4c722 100644 --- a/commands/run.go +++ b/commands/run.go @@ -46,6 +46,12 @@ var dbConnectFlags = []cli.Flag{ Value: defaultName, Usage: "A name that helps to identify this instance of visor.", }, + &cli.StringFlag{ + Name: "schema", + EnvVars: []string{"VISOR_SCHEMA"}, + Value: "public", + Usage: "The name of the postgresql schema that holds the objects used by this instance of visor.", + }, } var dbBehaviourFlags = []cli.Flag{ diff --git a/commands/setup.go b/commands/setup.go index fb5eea524..dc359dcad 100644 --- a/commands/setup.go +++ b/commands/setup.go @@ -38,7 +38,7 @@ var log = logging.Logger("visor") func setupDatabase(cctx *cli.Context) (*storage.Database, error) { ctx := cctx.Context - db, err := storage.NewDatabase(ctx, cctx.String("db"), cctx.Int("db-pool-size"), cctx.String("name"), cctx.Bool("db-allow-upsert")) + db, err := storage.NewDatabase(ctx, cctx.String("db"), cctx.Int("db-pool-size"), cctx.String("name"), cctx.String("schema"), cctx.Bool("db-allow-upsert")) if err != nil { return nil, xerrors.Errorf("new database: %w", err) } diff --git a/config/config.go b/config/config.go index d111c2b5f..e1b3d2f5d 100644 --- a/config/config.go +++ b/config/config.go @@ -31,6 +31,7 @@ type PgStorageConf struct { URLEnv string // name of an environment variable that contains the database URL URL string // URL used to connect to postgresql if URLEnv is not set ApplicationName string + SchemaName string PoolSize int AllowUpsert bool } @@ -83,6 +84,7 @@ func SampleConf() *Conf { PoolSize: 20, ApplicationName: "visor", AllowUpsert: false, + SchemaName: "public", }, // this second database is only here to give an example to the user "Database2": { @@ -90,6 +92,7 @@ func SampleConf() *Conf { PoolSize: 10, ApplicationName: "visor", AllowUpsert: false, + SchemaName: "public", }, }, diff --git a/model/actors/init/idaddress.go b/model/actors/init/idaddress.go index c4abe441d..8893e33fe 100644 --- a/model/actors/init/idaddress.go +++ b/model/actors/init/idaddress.go @@ -7,23 +7,56 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" ) type IdAddress struct { + Height int64 `pg:",pk,notnull,use_zero"` ID string `pg:",pk,notnull"` Address string `pg:",pk,notnull"` StateRoot string `pg:",pk,notnull"` } +type IdAddressV0 struct { + tableName struct{} `pg:"id_addresses"` // nolint: structcheck,unused + ID string `pg:",pk,notnull"` + Address string `pg:",pk,notnull"` + StateRoot string `pg:",pk,notnull"` +} + +func (ia *IdAddress) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if ia == nil { + return (*IdAddressV0)(nil), true + } + + return &IdAddressV0{ + ID: ia.ID, + Address: ia.Address, + StateRoot: ia.StateRoot, + }, true + case 1: + return ia, true + default: + return nil, false + } +} + func (ia *IdAddress) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "id_addresses")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, ia) + m, ok := ia.AsVersion(version) + if !ok { + return xerrors.Errorf("IdAddress not supported for schema version %s", version) + } + + return s.PersistModel(ctx, m) } type IdAddressList []*IdAddress @@ -36,10 +69,15 @@ func (ias IdAddressList) Persist(ctx context.Context, s model.StorageBatch, vers stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - for _, ia := range ias { - if err := s.PersistModel(ctx, ia); err != nil { - return err + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ias { + if err := m.Persist(ctx, s, version); err != nil { + return err + } } + return nil } - return nil + + return s.PersistModel(ctx, ias) } diff --git a/model/actors/miner/feedebt.go b/model/actors/miner/feedebt.go index a63974178..47bd6bfa0 100644 --- a/model/actors/miner/feedebt.go +++ b/model/actors/miner/feedebt.go @@ -5,6 +5,7 @@ import ( "go.opencensus.io/tag" "go.opentelemetry.io/otel/api/global" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -15,9 +16,38 @@ type MinerFeeDebt struct { MinerID string `pg:",pk,notnull"` StateRoot string `pg:",pk,notnull"` + FeeDebt string `pg:"type:numeric,notnull"` +} + +type MinerFeeDebtV0 struct { + tableName struct{} `pg:"miner_fee_debts"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + MinerID string `pg:",pk,notnull"` + StateRoot string `pg:",pk,notnull"` + FeeDebt string `pg:",notnull"` } +func (m *MinerFeeDebt) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if m == nil { + return (*MinerFeeDebtV0)(nil), true + } + + return &MinerFeeDebtV0{ + Height: m.Height, + MinerID: m.MinerID, + StateRoot: m.StateRoot, + FeeDebt: m.FeeDebt, + }, true + case 1: + return m, true + default: + return nil, false + } +} + func (m *MinerFeeDebt) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, span := global.Tracer("").Start(ctx, "MinerFeeDebt.Persist") defer span.End() @@ -26,7 +56,12 @@ func (m *MinerFeeDebt) Persist(ctx context.Context, s model.StorageBatch, versio stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, m) + vm, ok := m.AsVersion(version) + if !ok { + return xerrors.Errorf("MinerFeeDebt not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vm) } type MinerFeeDebtList []*MinerFeeDebt @@ -42,5 +77,16 @@ func (ml MinerFeeDebtList) Persist(ctx context.Context, s model.StorageBatch, ve if len(ml) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ml { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, ml) } diff --git a/model/actors/miner/lockedfunds.go b/model/actors/miner/lockedfunds.go index 27676e411..20fbf6f36 100644 --- a/model/actors/miner/lockedfunds.go +++ b/model/actors/miner/lockedfunds.go @@ -5,6 +5,7 @@ import ( "go.opencensus.io/tag" "go.opentelemetry.io/otel/api/global" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -15,11 +16,44 @@ type MinerLockedFund struct { MinerID string `pg:",pk,notnull"` StateRoot string `pg:",pk,notnull"` + LockedFunds string `pg:"type:numeric,notnull"` + InitialPledge string `pg:"type:numeric,notnull"` + PreCommitDeposits string `pg:"type:numeric,notnull"` +} + +type MinerLockedFundV0 struct { + tableName struct{} `pg:"miner_locked_funds"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + MinerID string `pg:",pk,notnull"` + StateRoot string `pg:",pk,notnull"` + LockedFunds string `pg:",notnull"` InitialPledge string `pg:",notnull"` PreCommitDeposits string `pg:",notnull"` } +func (m *MinerLockedFund) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if m == nil { + return (*MinerLockedFundV0)(nil), true + } + + return &MinerLockedFundV0{ + Height: m.Height, + MinerID: m.MinerID, + StateRoot: m.StateRoot, + LockedFunds: m.LockedFunds, + InitialPledge: m.InitialPledge, + PreCommitDeposits: m.PreCommitDeposits, + }, true + case 1: + return m, true + default: + return nil, false + } +} + func (m *MinerLockedFund) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, span := global.Tracer("").Start(ctx, "MinerLockedFund.Persist") defer span.End() @@ -28,7 +62,12 @@ func (m *MinerLockedFund) Persist(ctx context.Context, s model.StorageBatch, ver stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, m) + vm, ok := m.AsVersion(version) + if !ok { + return xerrors.Errorf("MinerLockedFund not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vm) } type MinerLockedFundsList []*MinerLockedFund @@ -44,5 +83,16 @@ func (ml MinerLockedFundsList) Persist(ctx context.Context, s model.StorageBatch if len(ml) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ml { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, ml) } diff --git a/model/actors/miner/precommit.go b/model/actors/miner/precommit.go index b9cef7753..791a196b3 100644 --- a/model/actors/miner/precommit.go +++ b/model/actors/miner/precommit.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -22,6 +23,28 @@ type MinerPreCommitInfo struct { SealRandEpoch int64 `pg:",use_zero"` ExpirationEpoch int64 `pg:",use_zero"` + PreCommitDeposit string `pg:"type:numeric,notnull"` + PreCommitEpoch int64 `pg:",use_zero"` + DealWeight string `pg:"type:numeric,notnull"` + VerifiedDealWeight string `pg:"type:numeric,notnull"` + + IsReplaceCapacity bool + ReplaceSectorDeadline uint64 `pg:",use_zero"` + ReplaceSectorPartition uint64 `pg:",use_zero"` + ReplaceSectorNumber uint64 `pg:",use_zero"` +} + +type MinerPreCommitInfoV0 struct { + tableName struct{} `pg:"miner_pre_commit_infos"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + MinerID string `pg:",pk,notnull"` + SectorID uint64 `pg:",pk,use_zero"` + StateRoot string `pg:",pk,notnull"` + + SealedCID string `pg:",notnull"` + SealRandEpoch int64 `pg:",use_zero"` + ExpirationEpoch int64 `pg:",use_zero"` + PreCommitDeposit string `pg:",notnull"` PreCommitEpoch int64 `pg:",use_zero"` DealWeight string `pg:",notnull"` @@ -33,12 +56,48 @@ type MinerPreCommitInfo struct { ReplaceSectorNumber uint64 `pg:",use_zero"` } +func (mpi *MinerPreCommitInfo) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if mpi == nil { + return (*MinerPreCommitInfoV0)(nil), true + } + + return &MinerPreCommitInfoV0{ + Height: mpi.Height, + MinerID: mpi.MinerID, + SectorID: mpi.SectorID, + StateRoot: mpi.StateRoot, + SealedCID: mpi.SealedCID, + SealRandEpoch: mpi.SealRandEpoch, + ExpirationEpoch: mpi.ExpirationEpoch, + PreCommitDeposit: mpi.PreCommitDeposit, + PreCommitEpoch: mpi.PreCommitEpoch, + DealWeight: mpi.DealWeight, + VerifiedDealWeight: mpi.VerifiedDealWeight, + IsReplaceCapacity: mpi.IsReplaceCapacity, + ReplaceSectorDeadline: mpi.ReplaceSectorDeadline, + ReplaceSectorPartition: mpi.ReplaceSectorPartition, + ReplaceSectorNumber: mpi.ReplaceSectorNumber, + }, true + case 1: + return mpi, true + default: + return nil, false + } +} + func (mpi *MinerPreCommitInfo) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "miner_pre_commit_infos")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, mpi) + m, ok := mpi.AsVersion(version) + if !ok { + return xerrors.Errorf("MinerPreCommitInfo not supported for schema version %s", version) + } + + return s.PersistModel(ctx, m) } type MinerPreCommitInfoList []*MinerPreCommitInfo @@ -54,5 +113,16 @@ func (ml MinerPreCommitInfoList) Persist(ctx context.Context, s model.StorageBat if len(ml) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ml { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, ml) } diff --git a/model/actors/miner/sector.go b/model/actors/miner/sector.go index d56326dbd..1c876309a 100644 --- a/model/actors/miner/sector.go +++ b/model/actors/miner/sector.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -23,6 +24,26 @@ type MinerSectorInfo struct { ActivationEpoch int64 `pg:",use_zero"` ExpirationEpoch int64 `pg:",use_zero"` + DealWeight string `pg:"type:numeric,notnull"` + VerifiedDealWeight string `pg:"type:numeric,notnull"` + + InitialPledge string `pg:"type:numeric,notnull"` + ExpectedDayReward string `pg:"type:numeric,notnull"` + ExpectedStoragePledge string `pg:"type:numeric,notnull"` +} + +type MinerSectorInfoV0 struct { + tableName struct{} `pg:"miner_sector_infos"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + MinerID string `pg:",pk,notnull"` + SectorID uint64 `pg:",pk,use_zero"` + StateRoot string `pg:",pk,notnull"` + + SealedCID string `pg:",notnull"` + + ActivationEpoch int64 `pg:",use_zero"` + ExpirationEpoch int64 `pg:",use_zero"` + DealWeight string `pg:",notnull"` VerifiedDealWeight string `pg:",notnull"` @@ -31,15 +52,50 @@ type MinerSectorInfo struct { ExpectedStoragePledge string `pg:",notnull"` } +func (msi *MinerSectorInfo) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if msi == nil { + return (*MinerSectorInfoV0)(nil), true + } + + return &MinerSectorInfoV0{ + Height: msi.Height, + MinerID: msi.MinerID, + SectorID: msi.SectorID, + StateRoot: msi.StateRoot, + SealedCID: msi.SealedCID, + ActivationEpoch: msi.ActivationEpoch, + ExpirationEpoch: msi.ExpirationEpoch, + DealWeight: msi.DealWeight, + VerifiedDealWeight: msi.VerifiedDealWeight, + InitialPledge: msi.InitialPledge, + ExpectedDayReward: msi.ExpectedDayReward, + ExpectedStoragePledge: msi.ExpectedStoragePledge, + }, true + case 1: + return msi, true + default: + return nil, false + } +} + func (msi *MinerSectorInfo) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "miner_sector_infos")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, msi) + m, ok := msi.AsVersion(version) + if !ok { + return xerrors.Errorf("MinerSectorInfo not supported for schema version %s", version) + } + + return s.PersistModel(ctx, m) } -type MinerSectorInfoList []*MinerSectorInfo +type ( + MinerSectorInfoList []*MinerSectorInfo +) func (ml MinerSectorInfoList) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, span := global.Tracer("").Start(ctx, "MinerSectorInfoList.Persist", trace.WithAttributes(label.Int("count", len(ml)))) @@ -52,5 +108,16 @@ func (ml MinerSectorInfoList) Persist(ctx context.Context, s model.StorageBatch, if len(ml) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ml { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, ml) } diff --git a/model/actors/power/chainpower.go b/model/actors/power/chainpower.go index 15455e9a4..b6fefcb08 100644 --- a/model/actors/power/chainpower.go +++ b/model/actors/power/chainpower.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -16,6 +17,26 @@ type ChainPower struct { Height int64 `pg:",pk,notnull,use_zero"` StateRoot string `pg:",pk"` + TotalRawBytesPower string `pg:"type:numeric,notnull"` + TotalQABytesPower string `pg:"type:numeric,notnull"` + + TotalRawBytesCommitted string `pg:"type:numeric,notnull"` + TotalQABytesCommitted string `pg:"type:numeric,notnull"` + + TotalPledgeCollateral string `pg:"type:numeric,notnull"` + + QASmoothedPositionEstimate string `pg:"type:numeric,notnull"` + QASmoothedVelocityEstimate string `pg:"type:numeric,notnull"` + + MinerCount uint64 `pg:",use_zero"` + ParticipatingMinerCount uint64 `pg:",use_zero"` +} + +type ChainPowerV0 struct { + tableName struct{} `pg:"chain_powers"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + StateRoot string `pg:",pk"` + TotalRawBytesPower string `pg:",notnull"` TotalQABytesPower string `pg:",notnull"` @@ -31,6 +52,33 @@ type ChainPower struct { ParticipatingMinerCount uint64 `pg:",use_zero"` } +func (cp *ChainPower) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if cp == nil { + return (*ChainPowerV0)(nil), true + } + + return &ChainPowerV0{ + Height: cp.Height, + StateRoot: cp.StateRoot, + TotalRawBytesPower: cp.TotalRawBytesPower, + TotalQABytesPower: cp.TotalQABytesPower, + TotalRawBytesCommitted: cp.TotalRawBytesCommitted, + TotalQABytesCommitted: cp.TotalQABytesCommitted, + TotalPledgeCollateral: cp.TotalPledgeCollateral, + QASmoothedPositionEstimate: cp.QASmoothedPositionEstimate, + QASmoothedVelocityEstimate: cp.QASmoothedVelocityEstimate, + MinerCount: cp.MinerCount, + ParticipatingMinerCount: cp.ParticipatingMinerCount, + }, true + case 1: + return cp, true + default: + return nil, false + } +} + func (cp *ChainPower) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, span := global.Tracer("").Start(ctx, "ChainPower.PersistWithTx") defer span.End() @@ -39,7 +87,12 @@ func (cp *ChainPower) Persist(ctx context.Context, s model.StorageBatch, version stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, cp) + vcp, ok := cp.AsVersion(version) + if !ok { + return xerrors.Errorf("ChainPower not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vcp) } // ChainPowerList is a slice of ChainPowers for batch insertion. @@ -58,5 +111,16 @@ func (cpl ChainPowerList) Persist(ctx context.Context, s model.StorageBatch, ver if len(cpl) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range cpl { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, cpl) } diff --git a/model/actors/power/claimedpower.go b/model/actors/power/claimedpower.go index 4bd388f13..97bf4200e 100644 --- a/model/actors/power/claimedpower.go +++ b/model/actors/power/claimedpower.go @@ -5,6 +5,7 @@ import ( "go.opencensus.io/tag" "go.opentelemetry.io/otel/api/global" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -14,8 +15,38 @@ type PowerActorClaim struct { Height int64 `pg:",pk,notnull,use_zero"` MinerID string `pg:",pk,notnull"` StateRoot string `pg:",pk,notnull"` - RawBytePower string `pg:",notnull"` - QualityAdjPower string `pg:",notnull"` + RawBytePower string `pg:"type:numeric,notnull"` + QualityAdjPower string `pg:"type:numeric,notnull"` +} + +type PowerActorClaimV0 struct { + tableName struct{} `pg:"power_actor_claims"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + MinerID string `pg:",pk,notnull"` + StateRoot string `pg:",pk,notnull"` + RawBytePower string `pg:",notnull"` + QualityAdjPower string `pg:",notnull"` +} + +func (p *PowerActorClaim) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if p == nil { + return (*PowerActorClaimV0)(nil), true + } + + return &PowerActorClaimV0{ + Height: p.Height, + MinerID: p.MinerID, + StateRoot: p.StateRoot, + RawBytePower: p.RawBytePower, + QualityAdjPower: p.QualityAdjPower, + }, true + case 1: + return p, true + default: + return nil, false + } } func (p *PowerActorClaim) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { @@ -26,7 +57,12 @@ func (p *PowerActorClaim) Persist(ctx context.Context, s model.StorageBatch, ver stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, p) + vp, ok := p.AsVersion(version) + if !ok { + return xerrors.Errorf("PowerActorClaim not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vp) } type PowerActorClaimList []*PowerActorClaim @@ -42,5 +78,16 @@ func (pl PowerActorClaimList) Persist(ctx context.Context, s model.StorageBatch, if len(pl) == 0 { return nil } + + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range pl { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, pl) } diff --git a/model/actors/reward/chainreward.go b/model/actors/reward/chainreward.go index cfd0e73ec..3d195ffd8 100644 --- a/model/actors/reward/chainreward.go +++ b/model/actors/reward/chainreward.go @@ -5,6 +5,7 @@ import ( "go.opencensus.io/tag" "go.opentelemetry.io/otel/api/global" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -13,18 +14,60 @@ import ( type ChainReward struct { Height int64 `pg:",pk,notnull,use_zero"` StateRoot string `pg:",pk,notnull"` - CumSumBaseline string `pg:",notnull"` - CumSumRealized string `pg:",notnull"` - EffectiveBaselinePower string `pg:",notnull"` - NewBaselinePower string `pg:",notnull"` - NewRewardSmoothedPositionEstimate string `pg:",notnull"` - NewRewardSmoothedVelocityEstimate string `pg:",notnull"` - TotalMinedReward string `pg:",notnull"` + CumSumBaseline string `pg:"type:numeric,notnull"` + CumSumRealized string `pg:"type:numeric,notnull"` + EffectiveBaselinePower string `pg:"type:numeric,notnull"` + NewBaselinePower string `pg:"type:numeric,notnull"` + NewRewardSmoothedPositionEstimate string `pg:"type:numeric,notnull"` + NewRewardSmoothedVelocityEstimate string `pg:"type:numeric,notnull"` + TotalMinedReward string `pg:"type:numeric,notnull"` + NewReward string `pg:"type:numeric,notnull"` + EffectiveNetworkTime int64 `pg:",use_zero"` +} + +type ChainRewardV0 struct { + tableName struct{} `pg:"chain_rewards"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + StateRoot string `pg:",pk,notnull"` + CumSumBaseline string `pg:",notnull"` + CumSumRealized string `pg:",notnull"` + EffectiveBaselinePower string `pg:",notnull"` + NewBaselinePower string `pg:",notnull"` + NewRewardSmoothedPositionEstimate string `pg:",notnull"` + NewRewardSmoothedVelocityEstimate string `pg:",notnull"` + TotalMinedReward string `pg:",notnull"` NewReward string `pg:",use_zero"` EffectiveNetworkTime int64 `pg:",use_zero"` } +func (r *ChainReward) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if r == nil { + return (*ChainRewardV0)(nil), true + } + + return &ChainRewardV0{ + Height: r.Height, + StateRoot: r.StateRoot, + CumSumBaseline: r.CumSumBaseline, + CumSumRealized: r.CumSumRealized, + EffectiveBaselinePower: r.EffectiveBaselinePower, + NewBaselinePower: r.NewBaselinePower, + NewRewardSmoothedPositionEstimate: r.NewRewardSmoothedPositionEstimate, + NewRewardSmoothedVelocityEstimate: r.NewRewardSmoothedVelocityEstimate, + TotalMinedReward: r.TotalMinedReward, + NewReward: r.NewReward, + EffectiveNetworkTime: r.EffectiveNetworkTime, + }, true + case 1: + return r, true + default: + return nil, false + } +} + func (r *ChainReward) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, span := global.Tracer("").Start(ctx, "ChainReward.Persist") defer span.End() @@ -33,5 +76,10 @@ func (r *ChainReward) Persist(ctx context.Context, s model.StorageBatch, version stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, r) + vr, ok := r.AsVersion(version) + if !ok { + return xerrors.Errorf("ChainReward not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vr) } diff --git a/model/chain/economics.go b/model/chain/economics.go index 5b0ea41e2..01a798cc8 100644 --- a/model/chain/economics.go +++ b/model/chain/economics.go @@ -7,12 +7,25 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" ) type ChainEconomics struct { + tableName struct{} `pg:"chain_economics"` //nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + ParentStateRoot string `pg:",notnull"` + CirculatingFil string `pg:"type:numeric,notnull"` + VestedFil string `pg:"type:numeric,notnull"` + MinedFil string `pg:"type:numeric,notnull"` + BurntFil string `pg:"type:numeric,notnull"` + LockedFil string `pg:"type:numeric,notnull"` + FilReserveDisbursed string `pg:"type:numeric,notnull"` +} + +type ChainEconomicsV0 struct { tableName struct{} `pg:"chain_economics"` // nolint: structcheck,unused ParentStateRoot string `pg:",notnull"` CirculatingFil string `pg:",notnull"` @@ -22,12 +35,39 @@ type ChainEconomics struct { LockedFil string `pg:",notnull"` } +func (c *ChainEconomics) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if c == nil { + return (*ChainEconomicsV0)(nil), true + } + + return &ChainEconomicsV0{ + ParentStateRoot: c.ParentStateRoot, + CirculatingFil: c.CirculatingFil, + VestedFil: c.VestedFil, + MinedFil: c.MinedFil, + BurntFil: c.BurntFil, + LockedFil: c.LockedFil, + }, true + case 1: + return c, true + default: + return nil, false + } +} + func (c *ChainEconomics) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "chain_economics")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, c) + m, ok := c.AsVersion(version) + if !ok { + return xerrors.Errorf("ChainEconomics not supported for schema version %s", version) + } + + return s.PersistModel(ctx, m) } type ChainEconomicsList []*ChainEconomics @@ -43,5 +83,15 @@ func (l ChainEconomicsList) Persist(ctx context.Context, s model.StorageBatch, v stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range l { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, l) } diff --git a/model/derived/gasoutputs.go b/model/derived/gasoutputs.go index b0daec0dd..7f35619d2 100644 --- a/model/derived/gasoutputs.go +++ b/model/derived/gasoutputs.go @@ -7,12 +7,41 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" ) type GasOutputs struct { + tableName struct{} `pg:"derived_gas_outputs"` //nolint: structcheck,unused + Height int64 `pg:",pk,use_zero,notnull"` + Cid string `pg:",pk,notnull"` + StateRoot string `pg:",pk,notnull"` + From string `pg:",notnull"` + To string `pg:",notnull"` + Value string `pg:"type:numeric,notnull"` + GasFeeCap string `pg:"type:numeric,notnull"` + GasPremium string `pg:"type:numeric,notnull"` + GasLimit int64 `pg:",use_zero,notnull"` + SizeBytes int `pg:",use_zero,notnull"` + Nonce uint64 `pg:",use_zero,notnull"` + Method uint64 `pg:",use_zero,notnull"` + ActorName string `pg:",notnull"` + ActorFamily string `pg:",notnull"` + ExitCode int64 `pg:",use_zero,notnull"` + GasUsed int64 `pg:",use_zero,notnull"` + ParentBaseFee string `pg:"type:numeric,notnull"` + BaseFeeBurn string `pg:"type:numeric,notnull"` + OverEstimationBurn string `pg:"type:numeric,notnull"` + MinerPenalty string `pg:"type:numeric,notnull"` + MinerTip string `pg:"type:numeric,notnull"` + Refund string `pg:"type:numeric,notnull"` + GasRefund int64 `pg:",use_zero,notnull"` + GasBurned int64 `pg:",use_zero,notnull"` +} + +type GasOutputsV0 struct { tableName struct{} `pg:"derived_gas_outputs"` //nolint: structcheck,unused Height int64 `pg:",pk,use_zero,notnull"` Cid string `pg:",pk,notnull"` @@ -39,12 +68,56 @@ type GasOutputs struct { GasBurned int64 `pg:",use_zero,notnull"` } +func (g *GasOutputs) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if g == nil { + return (*GasOutputsV0)(nil), true + } + + return &GasOutputsV0{ + Height: g.Height, + Cid: g.Cid, + StateRoot: g.StateRoot, + From: g.From, + To: g.To, + Value: g.Value, + GasFeeCap: g.GasFeeCap, + GasPremium: g.GasPremium, + GasLimit: g.GasLimit, + SizeBytes: g.SizeBytes, + Nonce: g.Nonce, + Method: g.Method, + ActorName: g.ActorName, + ExitCode: g.ExitCode, + GasUsed: g.GasUsed, + ParentBaseFee: g.ParentBaseFee, + BaseFeeBurn: g.BaseFeeBurn, + OverEstimationBurn: g.OverEstimationBurn, + MinerPenalty: g.MinerPenalty, + MinerTip: g.MinerTip, + Refund: g.Refund, + GasRefund: g.GasRefund, + GasBurned: g.GasBurned, + }, true + case 1: + return g, true + default: + return nil, false + } +} + func (g *GasOutputs) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "derived_gas_outputs")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, g) + vg, ok := g.AsVersion(version) + if !ok { + return xerrors.Errorf("GasOutputs not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vg) } type GasOutputsList []*GasOutputs @@ -60,5 +133,15 @@ func (l GasOutputsList) Persist(ctx context.Context, s model.StorageBatch, versi stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range l { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, l) } diff --git a/model/messages/gaseconomy.go b/model/messages/gaseconomy.go index c1dc51cd8..cd2a4096b 100644 --- a/model/messages/gaseconomy.go +++ b/model/messages/gaseconomy.go @@ -4,6 +4,7 @@ import ( "context" "go.opencensus.io/tag" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -14,6 +15,22 @@ type MessageGasEconomy struct { Height int64 `pg:",pk,notnull,use_zero"` StateRoot string `pg:",pk,notnull"` + BaseFee float64 `pg:"type:numeric,use_zero"` + BaseFeeChangeLog float64 `pg:",use_zero"` + + GasLimitTotal int64 `pg:"type:numeric,use_zero"` + GasLimitUniqueTotal int64 `pg:"type:numeric,use_zero"` + + GasFillRatio float64 `pg:",use_zero"` + GasCapacityRatio float64 `pg:",use_zero"` + GasWasteRatio float64 `pg:",use_zero"` +} + +type MessageGasEconomyV0 struct { + tableName struct{} `pg:"message_gas_economy"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + StateRoot string `pg:",pk,notnull"` + BaseFee float64 `pg:",use_zero"` BaseFeeChangeLog float64 `pg:",use_zero"` @@ -25,10 +42,40 @@ type MessageGasEconomy struct { GasWasteRatio float64 `pg:",use_zero"` } +func (g *MessageGasEconomy) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if g == nil { + return (*MessageGasEconomyV0)(nil), true + } + + return &MessageGasEconomyV0{ + Height: g.Height, + StateRoot: g.StateRoot, + BaseFee: g.BaseFee, + BaseFeeChangeLog: g.BaseFeeChangeLog, + GasLimitTotal: g.GasLimitTotal, + GasLimitUniqueTotal: g.GasLimitUniqueTotal, + GasFillRatio: g.GasFillRatio, + GasCapacityRatio: g.GasCapacityRatio, + GasWasteRatio: g.GasWasteRatio, + }, true + case 1: + return g, true + default: + return nil, false + } +} + func (g *MessageGasEconomy) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "message_gas_economy")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, g) + vm, ok := g.AsVersion(version) + if !ok { + return xerrors.Errorf("MessageGasEconomy not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vm) } diff --git a/model/messages/internal.go b/model/messages/internal.go new file mode 100644 index 000000000..bb2338ac0 --- /dev/null +++ b/model/messages/internal.go @@ -0,0 +1,88 @@ +package messages + +import ( + "context" + + "go.opencensus.io/tag" + "go.opentelemetry.io/otel/api/global" + "go.opentelemetry.io/otel/api/trace" + "go.opentelemetry.io/otel/label" + + "github.com/filecoin-project/sentinel-visor/metrics" + "github.com/filecoin-project/sentinel-visor/model" +) + +type InternalMessage struct { + tableName struct{} `pg:"internal_messages"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + Cid string `pg:",pk,notnull"` + StateRoot string `pg:",notnull"` + SourceMessage string + From string `pg:",notnull"` + To string `pg:",notnull"` + Value string `pg:"type:numeric,notnull"` + Method uint64 `pg:",use_zero"` + ActorName string `pg:",notnull"` + ActorFamily string `pg:",notnull"` + ExitCode int64 `pg:",use_zero"` + GasUsed int64 `pg:",use_zero"` +} + +func (im *InternalMessage) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { + ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "internal_messages")) + stop := metrics.Timer(ctx, metrics.PersistDuration) + defer stop() + + return s.PersistModel(ctx, im) +} + +type InternalMessageList []*InternalMessage + +func (l InternalMessageList) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { + if len(l) == 0 { + return nil + } + ctx, span := global.Tracer("").Start(ctx, "InternalMessageList.Persist", trace.WithAttributes(label.Int("count", len(l)))) + defer span.End() + + ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "internal_messages")) + stop := metrics.Timer(ctx, metrics.PersistDuration) + defer stop() + + return s.PersistModel(ctx, l) +} + +type InternalParsedMessage struct { + tableName struct{} `pg:"internal_parsed_messages"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + Cid string `pg:",pk,notnull"` + From string `pg:",notnull"` + To string `pg:",notnull"` + Value string `pg:"type:numeric,notnull"` + Method string `pg:",use_zero"` + Params string `pg:",type:jsonb"` +} + +func (ipm *InternalParsedMessage) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { + ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "internal_parsed_messages")) + stop := metrics.Timer(ctx, metrics.PersistDuration) + defer stop() + + return s.PersistModel(ctx, ipm) +} + +type InternalParsedMessageList []*InternalParsedMessage + +func (l InternalParsedMessageList) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { + if len(l) == 0 { + return nil + } + ctx, span := global.Tracer("").Start(ctx, "InternalParsedMessageList.Persist", trace.WithAttributes(label.Int("count", len(l)))) + defer span.End() + + ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "internal_parsed_messages")) + stop := metrics.Timer(ctx, metrics.PersistDuration) + defer stop() + + return s.PersistModel(ctx, l) +} diff --git a/model/messages/message.go b/model/messages/message.go index 56e4ae67c..2d7d525c3 100644 --- a/model/messages/message.go +++ b/model/messages/message.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -16,6 +17,23 @@ type Message struct { Height int64 `pg:",pk,notnull,use_zero"` Cid string `pg:",pk,notnull"` + From string `pg:",notnull"` + To string `pg:",notnull"` + Value string `pg:"type:numeric,notnull"` + GasFeeCap string `pg:"type:numeric,notnull"` + GasPremium string `pg:"type:numeric,notnull"` + + GasLimit int64 `pg:",use_zero"` + SizeBytes int `pg:",use_zero"` + Nonce uint64 `pg:",use_zero"` + Method uint64 `pg:",use_zero"` +} + +type MessageV0 struct { + tableName struct{} `pg:"messages"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + Cid string `pg:",pk,notnull"` + From string `pg:",notnull"` To string `pg:",notnull"` Value string `pg:",notnull"` @@ -28,12 +46,44 @@ type Message struct { Method uint64 `pg:",use_zero"` } +func (m *Message) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if m == nil { + return (*MessageV0)(nil), true + } + + return &MessageV0{ + Height: m.Height, + Cid: m.Cid, + From: m.From, + To: m.To, + Value: m.Value, + GasFeeCap: m.GasFeeCap, + GasPremium: m.GasPremium, + GasLimit: m.GasLimit, + SizeBytes: m.SizeBytes, + Nonce: m.Nonce, + Method: m.Method, + }, true + case 1: + return m, true + default: + return nil, false + } +} + func (m *Message) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Table, "messages")) stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, m) + vm, ok := m.AsVersion(version) + if !ok { + return xerrors.Errorf("Message not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vm) } type Messages []*Message @@ -49,5 +99,15 @@ func (ms Messages) Persist(ctx context.Context, s model.StorageBatch, version mo stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range ms { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, ms) } diff --git a/model/messages/parsedmessage.go b/model/messages/parsedmessage.go index b36cda368..ed7bae5b6 100644 --- a/model/messages/parsedmessage.go +++ b/model/messages/parsedmessage.go @@ -7,6 +7,7 @@ import ( "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/label" + "golang.org/x/xerrors" "github.com/filecoin-project/sentinel-visor/metrics" "github.com/filecoin-project/sentinel-visor/model" @@ -17,10 +18,43 @@ type ParsedMessage struct { Cid string `pg:",pk,notnull"` From string `pg:",notnull"` To string `pg:",notnull"` - Value string `pg:",notnull"` + Value string `pg:"type:numeric,notnull"` Method string `pg:",notnull"` + Params string `pg:",type:jsonb"` +} + +type ParsedMessageV0 struct { + tableName struct{} `pg:"parsed_messages"` // nolint: structcheck,unused + Height int64 `pg:",pk,notnull,use_zero"` + Cid string `pg:",pk,notnull"` + From string `pg:",notnull"` + To string `pg:",notnull"` + Value string `pg:",notnull"` + Method string `pg:",notnull"` + Params string `pg:",type:jsonb,notnull"` +} - Params string `pg:",type:jsonb,notnull"` +func (pm *ParsedMessage) AsVersion(version model.Version) (interface{}, bool) { + switch version.Major { + case 0: + if pm == nil { + return (*ParsedMessageV0)(nil), true + } + + return &ParsedMessageV0{ + Height: pm.Height, + Cid: pm.Cid, + From: pm.From, + To: pm.To, + Value: pm.Value, + Method: pm.Method, + Params: pm.Params, + }, true + case 1: + return pm, true + default: + return nil, false + } } func (pm *ParsedMessage) Persist(ctx context.Context, s model.StorageBatch, version model.Version) error { @@ -28,7 +62,12 @@ func (pm *ParsedMessage) Persist(ctx context.Context, s model.StorageBatch, vers stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() - return s.PersistModel(ctx, pm) + vpm, ok := pm.AsVersion(version) + if !ok { + return xerrors.Errorf("ParsedMessage not supported for schema version %s", version) + } + + return s.PersistModel(ctx, vpm) } type ParsedMessages []*ParsedMessage @@ -44,5 +83,15 @@ func (pms ParsedMessages) Persist(ctx context.Context, s model.StorageBatch, ver stop := metrics.Timer(ctx, metrics.PersistDuration) defer stop() + if version.Major != 1 { + // Support older versions, but in a non-optimal way + for _, m := range pms { + if err := m.Persist(ctx, s, version); err != nil { + return err + } + } + return nil + } + return s.PersistModel(ctx, pms) } diff --git a/schemas/v1/schema.go b/schemas/v1/schema.go new file mode 100644 index 000000000..f1cfb3cd4 --- /dev/null +++ b/schemas/v1/schema.go @@ -0,0 +1,1368 @@ +package v1 + +import ( + "github.com/filecoin-project/sentinel-visor/schemas" + "github.com/go-pg/migrations/v8" +) + +// Patches is the collection of patches made to the base schema +var Patches = migrations.NewCollection() + +func init() { + schemas.RegisterSchema(1) +} + +// BaseTemplate is the template the initial schema for this major version. The template expects variables to be +// passed using the schema.Config struct. Patches are applied on top of this base. +var BaseTemplate = ` + +{{- if and .SchemaName (ne .SchemaName "public") }} +SET search_path TO {{ .SchemaName }},public; +{{- end }} + +-- ===================================================================================================================== +-- TYPES +-- ===================================================================================================================== + +CREATE TYPE {{ .SchemaName | default "public"}}.miner_sector_event_type AS ENUM ( + 'PRECOMMIT_ADDED', + 'PRECOMMIT_EXPIRED', + 'COMMIT_CAPACITY_ADDED', + 'SECTOR_ADDED', + 'SECTOR_EXTENDED', + 'SECTOR_EXPIRED', + 'SECTOR_FAULTED', + 'SECTOR_RECOVERING', + 'SECTOR_RECOVERED', + 'SECTOR_TERMINATED' +); + +-- ===================================================================================================================== +-- INDEPENDENT FUNCTIONS +-- ===================================================================================================================== + +CREATE FUNCTION {{ .SchemaName | default "public"}}.height_to_unix(fil_epoch bigint) RETURNS bigint + LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE + AS $$ + SELECT ((fil_epoch * 30) + 1598306400)::bigint; + $$; + +CREATE FUNCTION {{ .SchemaName | default "public"}}.unix_to_height(unix_epoch bigint) RETURNS bigint + LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE + AS $$ + SELECT ((unix_epoch - 1598306400) / 30)::bigint; + $$; + +-- Note: system function 'now' is STABLE PARALLEL SAFE STRICT +CREATE FUNCTION {{ .SchemaName | default "public"}}.current_height() RETURNS bigint + LANGUAGE sql STABLE PARALLEL SAFE STRICT + AS $$ + SELECT unix_to_height(extract(epoch from now() AT TIME ZONE 'UTC')::bigint); + $$; + + +-- ===================================================================================================================== +-- TABLES +-- ===================================================================================================================== + +-- ---------------------------------------------------------------- +-- Name: actor_states +-- Model: common.ActorState +-- Growth: About 650 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.actor_states ( + head text NOT NULL, + code text NOT NULL, + state jsonb NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.actor_states ADD CONSTRAINT actor_states_pkey PRIMARY KEY (height, head, code); +CREATE INDEX actor_states_height_idx ON {{ .SchemaName | default "public"}}.actor_states USING btree (height DESC); + +-- Convert actor_states to a hypertable partitioned on height (time) +-- Assume ~20 state changes per epoch, ~850 bytes per table row +-- Height chunked per 4 days so we expect 11520*650 = ~7488000 rows per chunk, ~4.6GiB per chunk +SELECT create_hypertable( + 'actor_states', + 'height', + chunk_time_interval => 11520, + if_not_exists => TRUE +); +SELECT set_integer_now_func('actor_states', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.actor_states IS 'Actor states that were changed at an epoch. Associates actors states as single-level trees with CIDs pointing to complete state tree with the root CID (head) for that actor''s state.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actor_states.head IS 'CID of the root of the state tree for the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actor_states.code IS 'CID identifier for the type of the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actor_states.state IS 'Top level of state data.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actor_states.height IS 'Epoch when this state change happened.'; + + +-- ---------------------------------------------------------------- +-- Name: actors +-- Model: common.Actor +-- Growth: About 1300 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.actors ( + id text NOT NULL, + code text NOT NULL, + head text NOT NULL, + nonce bigint NOT NULL, + balance text NOT NULL, + state_root text NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.actors ADD CONSTRAINT actors_pkey PRIMARY KEY (height, id, state_root); +CREATE INDEX actors_height_idx ON {{ .SchemaName | default "public"}}.actors USING btree (height DESC); + +-- Convert actors to a hypertable partitioned on height (time) +-- Assume ~20 state changes per epoch, ~250 bytes per table row +-- Height chunked per 7 days so we expect 20160*1300 = ~26208000 rows per chunk, ~6.2GiB per chunk +SELECT create_hypertable( + 'actors', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('actors', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.actors IS 'Actors on chain that were added or updated at an epoch. Associates the actor''s state root CID (head) with the chain state root CID from which it decends. Includes account ID nonce and balance at each state.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.id IS 'Actor address.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.code IS 'Human readable identifier for the type of the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.head IS 'CID of the root of the state tree for the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.nonce IS 'The next actor nonce that is expected to appear on chain.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.balance IS 'Actor balance in attoFIL.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.state_root IS 'CID of the state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.actors.height IS 'Epoch when this actor was created or updated.'; + + +-- ---------------------------------------------------------------- +-- Name: blocks.block_headers +-- Model: blocks.BlockHeader +-- Growth: About 4-5 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.block_headers ( + cid text NOT NULL, + parent_weight text NOT NULL, + parent_state_root text NOT NULL, + height bigint NOT NULL, + miner text NOT NULL, + "timestamp" bigint NOT NULL, + win_count bigint, + parent_base_fee text NOT NULL, + fork_signaling bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.block_headers ADD CONSTRAINT block_headers_pkey PRIMARY KEY (height, cid); +CREATE INDEX block_headers_height_idx ON {{ .SchemaName | default "public"}}.block_headers USING btree (height DESC); +CREATE INDEX block_headers_timestamp_idx ON {{ .SchemaName | default "public"}}.block_headers USING btree ("timestamp"); + +-- Convert block_headers to a hypertable partitioned on height (time) +-- Assume ~5 blocks per epoch, ~432 bytes per table row +-- Height chunked per week so we expect 20160*5 = ~100800 rows per chunk, ~42MiB per chunk +SELECT create_hypertable( + 'block_headers', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('block_headers', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.block_headers IS 'Blocks included in tipsets at an epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.cid IS 'CID of the block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.parent_weight IS 'Aggregate chain weight of the block''s parent set.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.parent_state_root IS 'CID of the block''s parent state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.height IS 'Epoch when this block was mined.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.miner IS 'Address of the miner who mined this block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers."timestamp" IS 'Time the block was mined in Unix time, the number of seconds elapsed since January 1, 1970 UTC.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.win_count IS 'Number of reward units won in this block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.parent_base_fee IS 'The base fee after executing the parent tipset.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_headers.fork_signaling IS 'Flag used as part of signaling forks.'; + + +-- ---------------------------------------------------------------- +-- Name: block_messages +-- Model: messages.BlockMessage +-- Growth: About 900 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.block_messages ( + block text NOT NULL, + message text NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.block_messages ADD CONSTRAINT block_messages_pkey PRIMARY KEY (height, block, message); +CREATE INDEX block_messages_height_idx ON {{ .SchemaName | default "public"}}.block_messages USING btree (height DESC); + +-- Convert block_messages to a hypertable partitioned on height (time) +-- Assume ~250 messages per epoch, ~200 bytes per table row +-- Height chunked per day so we expect 2880*900 = ~2592000 rows per chunk, ~500MiB per chunk +SELECT create_hypertable( + 'block_messages', + 'height', + chunk_time_interval => 2880, + if_not_exists => TRUE +); +SELECT set_integer_now_func('block_messages', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.block_messages IS 'Message CIDs and the Blocks CID which contain them.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_messages.block IS 'CID of the block that contains the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_messages.message IS 'CID of a message in the block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_messages.height IS 'Epoch when the block was mined.'; + + +-- ---------------------------------------------------------------- +-- Name: block_parents +-- Model: blocks.BlockParent +-- Growth: About 20 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.block_parents ( + block text NOT NULL, + parent text NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.block_parents ADD CONSTRAINT block_parents_pkey PRIMARY KEY (height, block, parent); +CREATE INDEX block_parents_height_idx ON {{ .SchemaName | default "public"}}.block_parents USING btree (height DESC); + +-- Convert block_parents to a hypertable partitioned on height (time) +-- Assume ~5 blocks per epoch with ~4 parents, ~150 bytes per table row +-- Height chunked per week so we expect 20160*5*4 = ~403200 rows per chunk, ~58MiB per chunk +SELECT create_hypertable( + 'block_parents', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('block_parents', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.block_parents IS 'Block CIDs to many parent Block CIDs.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_parents.block IS 'CID of the block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_parents.parent IS 'CID of the parent block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.block_parents.height IS 'Epoch when the block was mined.'; + +-- ---------------------------------------------------------------- +-- Name: chain_economics +-- Model: chain.ChainEconomics +-- Growth: One row per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.chain_economics ( + height bigint NOT NULL, + parent_state_root text NOT NULL, + circulating_fil numeric NOT NULL, + vested_fil numeric NOT NULL, + mined_fil numeric NOT NULL, + burnt_fil numeric NOT NULL, + locked_fil numeric NOT NULL, + fil_reserve_disbursed numeric NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.chain_economics ADD CONSTRAINT chain_economics_pk PRIMARY KEY (height, parent_state_root); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.chain_economics IS 'Economic summaries per state root CID.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.height IS 'Epoch of the economic summary.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.parent_state_root IS 'CID of the parent state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.circulating_fil IS 'The amount of FIL (attoFIL) circulating and tradeable in the economy. The basis for Market Cap calculations.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.vested_fil IS 'Total amount of FIL (attoFIL) that is vested from genesis allocation.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.mined_fil IS 'The amount of FIL (attoFIL) that has been mined by storage miners.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.burnt_fil IS 'Total FIL (attoFIL) burned as part of penalties and on-chain computations.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.locked_fil IS 'The amount of FIL (attoFIL) locked as part of mining, deals, and other mechanisms.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_economics.fil_reserve_disbursed IS 'The amount of FIL (attoFIL) that has been disbursed from the mining reserve.'; + + +-- ---------------------------------------------------------------- +-- Name: chain_powers +-- Model: chain.ChainPower +-- Growth: One row per epoch +-- Notes: This was a hypertable in v0, removed since it only grows 1 row per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.chain_powers ( + state_root text NOT NULL, + total_raw_bytes_power numeric NOT NULL, + total_raw_bytes_committed numeric NOT NULL, + total_qa_bytes_power numeric NOT NULL, + total_qa_bytes_committed numeric NOT NULL, + total_pledge_collateral numeric NOT NULL, + qa_smoothed_position_estimate numeric NOT NULL, + qa_smoothed_velocity_estimate numeric NOT NULL, + miner_count bigint, + participating_miner_count bigint, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.chain_powers ADD CONSTRAINT chain_powers_pkey PRIMARY KEY (height, state_root); +CREATE INDEX chain_powers_height_idx ON {{ .SchemaName | default "public"}}.chain_powers USING btree (height DESC); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.chain_powers IS 'Power summaries from the Power actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.state_root IS 'CID of the parent state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.total_raw_bytes_power IS 'Total storage power in bytes in the network. Raw byte power is the size of a sector in bytes.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.total_raw_bytes_committed IS 'Total provably committed storage power in bytes. Raw byte power is the size of a sector in bytes.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.total_qa_bytes_power IS 'Total quality adjusted storage power in bytes in the network. Quality adjusted power is a weighted average of the quality of its space and it is based on the size, duration and quality of its deals.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.total_qa_bytes_committed IS 'Total provably committed, quality adjusted storage power in bytes. Quality adjusted power is a weighted average of the quality of its space and it is based on the size, duration and quality of its deals.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.total_pledge_collateral IS 'Total locked FIL (attoFIL) miners have pledged as collateral in order to participate in the economy.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.qa_smoothed_position_estimate IS 'Total power smoothed position estimate - Alpha Beta Filter "position" (value) estimate in Q.128 format.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.qa_smoothed_velocity_estimate IS 'Total power smoothed velocity estimate - Alpha Beta Filter "velocity" (rate of change of value) estimate in Q.128 format.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.miner_count IS 'Total number of miners.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.participating_miner_count IS 'Total number of miners with power above the minimum miner threshold.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_powers.height IS 'Epoch this power summary applies to.'; + + +-- ---------------------------------------------------------------- +-- Name: chain_rewards +-- Model: reward.ChainReward +-- Growth: One row per epoch +-- Notes: This was a hypertable in v0, removed since it only grows 1 row per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.chain_rewards ( + state_root text NOT NULL, + cum_sum_baseline numeric NOT NULL, + cum_sum_realized numeric NOT NULL, + effective_baseline_power numeric NOT NULL, + new_baseline_power numeric NOT NULL, + new_reward_smoothed_position_estimate numeric NOT NULL, + new_reward_smoothed_velocity_estimate numeric NOT NULL, + total_mined_reward numeric NOT NULL, + new_reward numeric, + effective_network_time bigint, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.chain_rewards ADD CONSTRAINT chain_rewards_pkey PRIMARY KEY (height, state_root); +CREATE INDEX chain_rewards_height_idx ON {{ .SchemaName | default "public"}}.chain_rewards USING btree (height DESC); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.chain_rewards IS 'Reward summaries from the Reward actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.state_root IS 'CID of the parent state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.cum_sum_baseline IS 'Target that CumsumRealized needs to reach for EffectiveNetworkTime to increase. It is measured in byte-epochs (space * time) representing power committed to the network for some duration.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.cum_sum_realized IS 'Cumulative sum of network power capped by BaselinePower(epoch). It is measured in byte-epochs (space * time) representing power committed to the network for some duration.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.effective_baseline_power IS 'The baseline power (in bytes) at the EffectiveNetworkTime epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.new_baseline_power IS 'The baseline power (in bytes) the network is targeting.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.new_reward_smoothed_position_estimate IS 'Smoothed reward position estimate - Alpha Beta Filter "position" (value) estimate in Q.128 format.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.new_reward_smoothed_velocity_estimate IS 'Smoothed reward velocity estimate - Alpha Beta Filter "velocity" (rate of change of value) estimate in Q.128 format.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.total_mined_reward IS 'The total FIL (attoFIL) awarded to block miners.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.new_reward IS 'The reward to be paid in per WinCount to block producers. The actual reward total paid out depends on the number of winners in any round. This value is recomputed every non-null epoch and used in the next non-null epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.effective_network_time IS 'Ceiling of real effective network time "theta" based on CumsumBaselinePower(theta) == CumsumRealizedPower. Theta captures the notion of how much the network has progressed in its baseline and in advancing network time.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.chain_rewards.height IS 'Epoch this rewards summary applies to.'; + +-- ---------------------------------------------------------------- +-- Name: derived_gas_outputs +-- Model: derived.GasOutputs +-- Growth: About 340 rows per epoch +-- Notes: Converted to hypertable +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.derived_gas_outputs ( + cid text NOT NULL, + "from" text NOT NULL, + "to" text NOT NULL, + value numeric NOT NULL, + gas_fee_cap numeric NOT NULL, + gas_premium numeric NOT NULL, + gas_limit bigint, + size_bytes bigint, + nonce bigint, + method bigint, + state_root text NOT NULL, + exit_code bigint NOT NULL, + gas_used bigint NOT NULL, + parent_base_fee numeric NOT NULL, + base_fee_burn numeric NOT NULL, + over_estimation_burn numeric NOT NULL, + miner_penalty numeric NOT NULL, + miner_tip numeric NOT NULL, + refund numeric NOT NULL, + gas_refund bigint NOT NULL, + gas_burned bigint NOT NULL, + height bigint NOT NULL, + actor_name text NOT NULL, + actor_family text NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.derived_gas_outputs ADD CONSTRAINT derived_gas_outputs_pkey PRIMARY KEY (height, cid, state_root); +CREATE INDEX derived_gas_outputs_exit_code_index ON {{ .SchemaName | default "public"}}.derived_gas_outputs USING btree (exit_code); +CREATE INDEX derived_gas_outputs_from_index ON {{ .SchemaName | default "public"}}.derived_gas_outputs USING hash ("from"); +CREATE INDEX derived_gas_outputs_method_index ON {{ .SchemaName | default "public"}}.derived_gas_outputs USING btree (method); +CREATE INDEX derived_gas_outputs_to_index ON {{ .SchemaName | default "public"}}.derived_gas_outputs USING hash ("to"); +CREATE INDEX derived_gas_outputs_actor_family_index ON {{ .SchemaName | default "public"}}.derived_gas_outputs USING btree ("actor_family"); + +-- Convert block_headers to a hypertable partitioned on height (time) +-- Assume ~340 rows per epoch, ~491 bytes per table row +-- Height chunked per week so we expect 20160*340 = ~6854400 rows per chunk, ~3.2GiB per chunk +SELECT create_hypertable( + 'derived_gas_outputs', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('derived_gas_outputs', 'current_height', replace_if_exists => true); + + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.derived_gas_outputs IS 'Derived gas costs resulting from execution of a message in the VM.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.cid IS 'CID of the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs."from" IS 'Address of actor that sent the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs."to" IS 'Address of actor that received the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.value IS 'The FIL value transferred (attoFIL) to the message receiver.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_fee_cap IS 'The maximum price that the message sender is willing to pay per unit of gas.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_premium IS 'The price per unit of gas (measured in attoFIL/gas) that the message sender is willing to pay (on top of the BaseFee) to "tip" the miner that will include this message in a block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_limit IS 'A hard limit on the amount of gas (i.e., number of units of gas) that a message’s execution should be allowed to consume on chain. It is measured in units of gas.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.size_bytes IS 'Size in bytes of the serialized message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.nonce IS 'The message nonce, which protects against duplicate messages and multiple messages with the same values.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.method IS 'The method number to invoke. Only unique to the actor the method is being invoked on. A method number of 0 is a plain token transfer - no method exectution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.state_root IS 'CID of the parent state root.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.exit_code IS 'The exit code that was returned as a result of executing the message. Exit code 0 indicates success. Codes 0-15 are reserved for use by the runtime. Codes 16-31 are common codes shared by different actors. Codes 32+ are actor specific.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_used IS 'A measure of the amount of resources (or units of gas) consumed, in order to execute a message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.parent_base_fee IS 'The set price per unit of gas (measured in attoFIL/gas unit) to be burned (sent to an unrecoverable address) for every message execution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.base_fee_burn IS 'The amount of FIL (in attoFIL) to burn as a result of the base fee. It is parent_base_fee (or gas_fee_cap if smaller) multiplied by gas_used. Note: successful window PoSt messages are not charged this burn.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.over_estimation_burn IS 'The fee to pay (in attoFIL) for overestimating the gas used to execute a message. The overestimated gas to burn (gas_burned) is a portion of the difference between gas_limit and gas_used. The over_estimation_burn value is gas_burned * parent_base_fee.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.miner_penalty IS 'Any penalty fees (in attoFIL) the miner incured while executing the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.miner_tip IS 'The amount of FIL (in attoFIL) the miner receives for executing the message. Typically it is gas_premium * gas_limit but may be lower if the total fees exceed the gas_fee_cap.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.refund IS 'The amount of FIL (in attoFIL) to refund to the message sender after base fee, miner tip and overestimation amounts have been deducted.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_refund IS 'The overestimated units of gas to refund. It is a portion of the difference between gas_limit and gas_used.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.gas_burned IS 'The overestimated units of gas to burn. It is a portion of the difference between gas_limit and gas_used.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.height IS 'Epoch this message was executed at.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.derived_gas_outputs.actor_name IS 'Human readable identifier for the type of the actor.'; + + +-- ---------------------------------------------------------------- +-- Name: drand_block_entries +-- Model: blocks.DrandBlockEntrie +-- Growth: About 4 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.drand_block_entries ( + round bigint NOT NULL, + block text NOT NULL +); +CREATE UNIQUE INDEX block_drand_entries_round_uindex ON {{ .SchemaName | default "public"}}.drand_block_entries USING btree (round, block); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.drand_block_entries IS 'Drand randomness round numbers used in each block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.drand_block_entries.round IS 'The round number of the randomness used.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.drand_block_entries.block IS 'CID of the block.'; + +-- ---------------------------------------------------------------- +-- Name: gopg_migrations +-- Notes: This table and sequence can be created during version checking before a migration. +-- ---------------------------------------------------------------- +ALTER SEQUENCE {{ .SchemaName | default "public"}}.gopg_migrations_id_seq OWNED BY {{ .SchemaName | default "public"}}.gopg_migrations.id; + +CREATE SEQUENCE IF NOT EXISTS {{ .SchemaName | default "public"}}.gopg_migrations_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE IF NOT EXISTS {{ .SchemaName | default "public"}}.gopg_migrations ( + id integer NOT NULL, + version bigint, + created_at timestamp with time zone +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.gopg_migrations ALTER COLUMN id SET DEFAULT nextval('{{ .SchemaName | default "public"}}.gopg_migrations_id_seq'::regclass); + + +-- ---------------------------------------------------------------- +-- Name: id_addresses +-- Model: init.IdAddress +-- Growth: About 1 row per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.id_addresses ( + height bigint NOT NULL, + id text NOT NULL, + address text NOT NULL, + state_root text NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.id_addresses ADD CONSTRAINT id_addresses_pkey PRIMARY KEY (height, id, address, state_root); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.id_addresses IS 'Mapping of IDs to robust addresses from the init actor''s state.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.id_addresses.height IS 'Epoch at which this address mapping was added.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.id_addresses.id IS 'ID of the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.id_addresses.address IS 'Robust address of the actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.id_addresses.state_root IS 'CID of the parent state root at which this address mapping was added.'; + +-- ---------------------------------------------------------------- +-- Name: internal_messages +-- Model: messages.InternalMessage +-- Growth: Estimate ~400 per epoch, roughly same as messages +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.internal_messages ( + height bigint NOT NULL, + cid text NOT NULL, + state_root text NOT NULL, + source_message text, + "from" text NOT NULL, + "to" text NOT NULL, + value numeric NOT NULL, + method bigint NOT NULL, + actor_name text NOT NULL, + actor_family text NOT NULL, + exit_code bigint NOT NULL, + gas_used bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.internal_messages ADD CONSTRAINT internal_messages_pkey PRIMARY KEY (height, cid); +CREATE INDEX internal_messages_exit_code_index ON {{ .SchemaName | default "public"}}.internal_messages USING btree (exit_code); +CREATE INDEX internal_messages_from_index ON {{ .SchemaName | default "public"}}.internal_messages USING hash ("from"); +CREATE INDEX internal_messages_method_index ON {{ .SchemaName | default "public"}}.internal_messages USING btree (method); +CREATE INDEX internal_messages_to_index ON {{ .SchemaName | default "public"}}.internal_messages USING hash ("to"); +CREATE INDEX internal_messages_actor_family_index ON {{ .SchemaName | default "public"}}.internal_messages USING btree ("actor_family"); + +-- Convert messages to a hypertable partitioned on height (time) +-- Height chunked per week so we expect 20160*400 = ~8064000 rows per chunk, ~2.8GiB per chunk +SELECT create_hypertable( + 'internal_messages', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('internal_messages', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.internal_messages IS 'Messages generated implicitly by system actors and by using the runtime send method.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.height IS 'Epoch this message was executed at.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.cid IS 'CID of the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.state_root IS 'CID of the parent state root at which this message was executed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.source_message IS 'CID of the message that caused this message to be sent.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages."from" IS 'Address of the actor that sent the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages."to" IS 'Address of the actor that received the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.value IS 'Amount of FIL (in attoFIL) transferred by this message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.method IS 'The method number invoked on the recipient actor. Only unique to the actor the method is being invoked on. A method number of 0 is a plain token transfer - no method exectution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.actor_name IS 'The full versioned name of the actor that received the message (for example fil/3/storagepower).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.actor_family IS 'The short unversioned name of the actor that received the message (for example storagepower).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.exit_code IS 'The exit code that was returned as a result of executing the message. Exit code 0 indicates success. Codes 0-15 are reserved for use by the runtime. Codes 16-31 are common codes shared by different actors. Codes 32+ are actor specific.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_messages.gas_used IS 'A measure of the amount of resources (or units of gas) consumed, in order to execute a message.'; + +-- ---------------------------------------------------------------- +-- Name: internal_parsed_messages +-- Model: messages.InternalParsedMessage +-- Growth: Estimate ~400 per epoch, roughly same as internal_messages +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.internal_parsed_messages ( + height bigint NOT NULL, + cid text NOT NULL, + "from" text NOT NULL, + "to" text NOT NULL, + value numeric NOT NULL, + method text NOT NULL, + params jsonb +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.internal_parsed_messages ADD CONSTRAINT internal_parsed_messages_pkey PRIMARY KEY (height, cid); +CREATE INDEX internal_parsed_messages_from_idx ON {{ .SchemaName | default "public"}}.internal_parsed_messages USING hash ("from"); +CREATE INDEX internal_parsed_messages_method_idx ON {{ .SchemaName | default "public"}}.internal_parsed_messages USING hash (method); +CREATE INDEX internal_parsed_messages_to_idx ON {{ .SchemaName | default "public"}}.internal_parsed_messages USING hash ("to"); + +-- Convert messages to a hypertable partitioned on height (time) +-- Height chunked per week so we expect 20160*400 = ~8064000 rows per chunk, ~2.8GiB per chunk +SELECT create_hypertable( + 'internal_parsed_messages', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('internal_parsed_messages', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.internal_parsed_messages IS 'Internal messages parsed to extract useful information.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages.height IS 'Epoch this message was executed at.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages.cid IS 'CID of the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages."from" IS 'Address of the actor that sent the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages."to" IS 'Address of the actor that received the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages.value IS 'Amount of FIL (in attoFIL) transferred by this message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages.method IS 'The method number invoked on the recipient actor. Only unique to the actor the method is being invoked on. A method number of 0 is a plain token transfer - no method exectution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.internal_parsed_messages.params IS 'Method parameters parsed and serialized as a JSON object.'; + + +-- ---------------------------------------------------------------- +-- Name: market_deal_proposals +-- Model: market.MarketDealProposal +-- Growth: About 2 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.market_deal_proposals ( + deal_id bigint NOT NULL, + state_root text NOT NULL, + piece_cid text NOT NULL, + padded_piece_size bigint NOT NULL, + unpadded_piece_size bigint NOT NULL, + is_verified boolean NOT NULL, + client_id text NOT NULL, + provider_id text NOT NULL, + start_epoch bigint NOT NULL, + end_epoch bigint NOT NULL, + slashed_epoch bigint, + storage_price_per_epoch text NOT NULL, + provider_collateral text NOT NULL, + client_collateral text NOT NULL, + label text, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.market_deal_proposals ADD CONSTRAINT market_deal_proposals_pkey PRIMARY KEY (height, deal_id); +CREATE INDEX market_deal_proposals_height_idx ON {{ .SchemaName | default "public"}}.market_deal_proposals USING btree (height DESC); + +-- Convert market_deal_proposals to a hypertable partitioned on height (time) +-- Assume ~5 per epoch, ~350 bytes per table row +-- Height chunked per 7 days so we expect 20160*5 = ~100800 rows per chunk, 34MiB per chunk +SELECT create_hypertable( + 'market_deal_proposals', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('market_deal_proposals', 'current_height', replace_if_exists => true); + + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.market_deal_proposals IS 'All storage deal states with latest values applied to end_epoch when updates are detected on-chain.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.deal_id IS 'Identifier for the deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.state_root IS 'CID of the parent state root for this deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.piece_cid IS 'CID of a sector piece. A Piece is an object that represents a whole or part of a File.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.padded_piece_size IS 'The piece size in bytes with padding.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.unpadded_piece_size IS 'The piece size in bytes without padding.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.is_verified IS 'Deal is with a verified provider.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.client_id IS 'Address of the actor proposing the deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.provider_id IS 'Address of the actor providing the services.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.start_epoch IS 'The epoch at which this deal with begin. Storage deal must appear in a sealed (proven) sector no later than start_epoch, otherwise it is invalid.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.end_epoch IS 'The epoch at which this deal with end.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.storage_price_per_epoch IS 'The amount of FIL (in attoFIL) that will be transferred from the client to the provider every epoch this deal is active for.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.provider_collateral IS 'The amount of FIL (in attoFIL) the provider has pledged as collateral. The Provider deal collateral is only slashed when a sector is terminated before the deal expires.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.client_collateral IS 'The amount of FIL (in attoFIL) the client has pledged as collateral.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.label IS 'An arbitrary client chosen label to apply to the deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_proposals.height IS 'Epoch at which this deal proposal was added or changed.'; + + +-- ---------------------------------------------------------------- +-- Name: market_deal_states +-- Model: market.MarketDealState +-- Growth: About 200 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.market_deal_states ( + deal_id bigint NOT NULL, + sector_start_epoch bigint NOT NULL, + last_update_epoch bigint NOT NULL, + slash_epoch bigint NOT NULL, + state_root text NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.market_deal_states ADD CONSTRAINT market_deal_states_pkey PRIMARY KEY (height, deal_id, state_root); +CREATE INDEX market_deal_states_height_idx ON {{ .SchemaName | default "public"}}.market_deal_states USING btree (height DESC); + +-- Convert market_deal_states to a hypertable partitioned on height (time) +-- Assume ~200 per epoch, ~150 bytes per table row +-- Height chunked per 7 days so we expect 20160*200 = ~4032000 rows per chunk, ~576MiB per chunk +SELECT create_hypertable( + 'market_deal_states', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('market_deal_states', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.market_deal_states IS 'All storage deal state transitions detected on-chain.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.deal_id IS 'Identifier for the deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.sector_start_epoch IS 'Epoch this deal was included in a proven sector. -1 if not yet included in proven sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.last_update_epoch IS 'Epoch this deal was last updated at. -1 if deal state never updated.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.slash_epoch IS 'Epoch this deal was slashed at. -1 if deal was never slashed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.state_root IS 'CID of the parent state root for this deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.market_deal_states.height IS 'Epoch at which this deal was added or changed.'; + +-- ---------------------------------------------------------------- +-- Name: message_gas_economy +-- Model: messages.MessageGasEconomy +-- Growth: One row per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.message_gas_economy ( + state_root text NOT NULL, + gas_limit_total numeric NOT NULL, + gas_limit_unique_total numeric, + base_fee numeric NOT NULL, + base_fee_change_log double precision NOT NULL, + gas_fill_ratio double precision, + gas_capacity_ratio double precision, + gas_waste_ratio double precision, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.message_gas_economy ADD CONSTRAINT message_gas_economy_pkey PRIMARY KEY (height, state_root); +CREATE INDEX message_gas_economy_height_idx ON {{ .SchemaName | default "public"}}.message_gas_economy USING btree (height DESC); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.message_gas_economy IS 'Gas economics for all messages in all blocks at each epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.gas_limit_total IS 'The sum of all the gas limits.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.gas_limit_unique_total IS 'The sum of all the gas limits of unique messages.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.base_fee IS 'The set price per unit of gas (measured in attoFIL/gas unit) to be burned (sent to an unrecoverable address) for every message execution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.base_fee_change_log IS 'The logarithm of the change between new and old base fee.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.gas_fill_ratio IS 'The gas_limit_total / target gas limit total for all blocks.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.gas_capacity_ratio IS 'The gas_limit_unique_total / target gas limit total for all blocks.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.gas_waste_ratio IS '(gas_limit_total - gas_limit_unique_total) / target gas limit total for all blocks.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.message_gas_economy.height IS 'Epoch these economics apply to.'; + + +-- ---------------------------------------------------------------- +-- Name: messages +-- Model: messages.Message +-- Growth: About 400 rows per epoch +-- Notes: This was chunked daily in v0, now converted to weekly +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.messages ( + cid text NOT NULL, + "from" text NOT NULL, + "to" text NOT NULL, + size_bytes bigint NOT NULL, + nonce bigint NOT NULL, + value numeric NOT NULL, + gas_fee_cap numeric NOT NULL, + gas_premium numeric NOT NULL, + gas_limit bigint NOT NULL, + method bigint, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.messages ADD CONSTRAINT messages_pkey PRIMARY KEY (height, cid); +CREATE INDEX messages_from_index ON {{ .SchemaName | default "public"}}.messages USING btree ("from"); +CREATE INDEX messages_height_idx ON {{ .SchemaName | default "public"}}.messages USING btree (height DESC); +CREATE INDEX messages_to_index ON {{ .SchemaName | default "public"}}.messages USING btree ("to"); + +-- Convert messages to a hypertable partitioned on height (time) +-- Assume ~400 messages per epoch, ~373 bytes per table row (not including toast) +-- Height chunked per week so we expect 20160*400 = ~8064000 rows per chunk, ~2.8GiB per chunk +SELECT create_hypertable( + 'messages', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('messages', 'current_height', replace_if_exists => true); + + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.messages IS 'Validated on-chain messages by their CID and their metadata.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.cid IS 'CID of the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages."from" IS 'Address of the actor that sent the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages."to" IS 'Address of the actor that received the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.size_bytes IS 'Size of the serialized message in bytes.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.nonce IS 'The message nonce, which protects against duplicate messages and multiple messages with the same values.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.value IS 'Amount of FIL (in attoFIL) transferred by this message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.gas_fee_cap IS 'The maximum price that the message sender is willing to pay per unit of gas.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.gas_premium IS 'The price per unit of gas (measured in attoFIL/gas) that the message sender is willing to pay (on top of the BaseFee) to "tip" the miner that will include this message in a block.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.method IS 'The method number invoked on the recipient actor. Only unique to the actor the method is being invoked on. A method number of 0 is a plain token transfer - no method exectution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.messages.height IS 'Epoch this message was executed at.'; + +-- ---------------------------------------------------------------- +-- Name: miner_current_deadline_infos +-- Model: miner.MinerCurrentDeadlineInfo +-- Growth: About 1200 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_current_deadline_infos ( + height bigint NOT NULL, + miner_id text NOT NULL, + state_root text NOT NULL, + deadline_index bigint NOT NULL, + period_start bigint NOT NULL, + open bigint NOT NULL, + close bigint NOT NULL, + challenge bigint NOT NULL, + fault_cutoff bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_current_deadline_infos ADD CONSTRAINT miner_current_deadline_infos_pkey PRIMARY KEY (height, miner_id, state_root); +CREATE INDEX miner_current_deadline_infos_height_idx ON {{ .SchemaName | default "public"}}.miner_current_deadline_infos USING btree (height DESC); + +SELECT create_hypertable( + 'miner_current_deadline_infos', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_current_deadline_infos', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_current_deadline_infos IS 'Deadline refers to the window during which proofs may be submitted.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.height IS 'Epoch at which this info was calculated.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.miner_id IS 'Address of the miner this info relates to.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.deadline_index IS 'A deadline index, in [0..d.WPoStProvingPeriodDeadlines) unless period elapsed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.period_start IS 'First epoch of the proving period (<= CurrentEpoch).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.open IS 'First epoch from which a proof may be submitted (>= CurrentEpoch).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.close IS 'First epoch from which a proof may no longer be submitted (>= Open).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.challenge IS 'Epoch at which to sample the chain for challenge (< Open).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_current_deadline_infos.fault_cutoff IS 'First epoch at which a fault declaration is rejected (< Open).'; + + +-- ---------------------------------------------------------------- +-- Name: miner_fee_debts +-- Model: miner.MinerFeeDebt +-- Growth: About 1200 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_fee_debts ( + height bigint NOT NULL, + miner_id text NOT NULL, + state_root text NOT NULL, + fee_debt numeric NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_fee_debts ADD CONSTRAINT miner_fee_debts_pkey PRIMARY KEY (height, miner_id, state_root); +CREATE INDEX miner_fee_debts_height_idx ON {{ .SchemaName | default "public"}}.miner_fee_debts USING btree (height DESC); + +SELECT create_hypertable( + 'miner_fee_debts', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_fee_debts', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_fee_debts IS 'Miner debts per epoch from unpaid fees.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_fee_debts.height IS 'Epoch at which this debt applies.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_fee_debts.miner_id IS 'Address of the miner that owes fees.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_fee_debts.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_fee_debts.fee_debt IS 'Absolute value of debt this miner owes from unpaid fees in attoFIL.'; + +-- ---------------------------------------------------------------- +-- Name: miner_infos +-- Model: miner.MinerInfo +-- Growth: Less than one per epoch +-- Notes: This was a hypertable in v0, removed due to low rate of growth +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_infos ( + height bigint NOT NULL, + miner_id text NOT NULL, + state_root text NOT NULL, + owner_id text NOT NULL, + worker_id text NOT NULL, + new_worker text, + worker_change_epoch bigint NOT NULL, + consensus_faulted_elapsed bigint NOT NULL, + peer_id text, + control_addresses jsonb, + multi_addresses jsonb, + sector_size bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_infos ADD CONSTRAINT miner_infos_pkey PRIMARY KEY (height, miner_id, state_root); +CREATE INDEX miner_infos_height_idx ON {{ .SchemaName | default "public"}}.miner_infos USING btree (height DESC); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_infos IS 'Miner Account IDs for all associated addresses plus peer ID. See https://docs.filecoin.io/mine/lotus/miner-addresses/ for more information.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.height IS 'Epoch at which this miner info was added/changed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.miner_id IS 'Address of miner this info applies to.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.owner_id IS 'Address of actor designated as the owner. The owner address is the address that created the miner, paid the collateral, and has block rewards paid out to it.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.worker_id IS 'Address of actor designated as the worker. The worker is responsible for doing all of the work, submitting proofs, committing new sectors, and all other day to day activities.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.new_worker IS 'Address of a new worker address that will become effective at worker_change_epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.worker_change_epoch IS 'Epoch at which a new_worker address will become effective.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.consensus_faulted_elapsed IS 'The next epoch this miner is eligible for certain permissioned actor methods and winning block elections as a result of being reported for a consensus fault.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.peer_id IS 'Current libp2p Peer ID of the miner.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.control_addresses IS 'JSON array of control addresses. Control addresses are used to submit WindowPoSts proofs to the chain. WindowPoSt is the mechanism through which storage is verified in Filecoin and is required by miners to submit proofs for all sectors every 24 hours. Those proofs are submitted as messages to the blockchain and therefore need to pay the respective fees.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_infos.multi_addresses IS 'JSON array of multiaddrs at which this miner can be reached.'; + + +-- ---------------------------------------------------------------- +-- Name: miner_locked_funds +-- Model: miner.MinerLockedFund +-- Growth: About 1200 per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_locked_funds ( + height bigint NOT NULL, + miner_id text NOT NULL, + state_root text NOT NULL, + locked_funds numeric NOT NULL, + initial_pledge numeric NOT NULL, + pre_commit_deposits numeric NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_locked_funds ADD CONSTRAINT miner_locked_funds_pkey PRIMARY KEY (height, miner_id, state_root); +CREATE INDEX miner_locked_funds_height_idx ON {{ .SchemaName | default "public"}}.miner_locked_funds USING btree (height DESC); + +SELECT create_hypertable( + 'miner_locked_funds', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_locked_funds', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_locked_funds IS 'Details of Miner funds locked and unavailable for use.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.height IS 'Epoch at which these details were added/changed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.miner_id IS 'Address of the miner these details apply to.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.locked_funds IS 'Amount of FIL (in attoFIL) locked due to vesting. When a Miner receives tokens from block rewards, the tokens are locked and added to the Miner''s vesting table to be unlocked linearly over some future epochs.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.initial_pledge IS 'Amount of FIL (in attoFIL) locked due to it being pledged as collateral. When a Miner ProveCommits a Sector, they must supply an "initial pledge" for the Sector, which acts as collateral. If the Sector is terminated, this deposit is removed and burned along with rewards earned by this sector up to a limit.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_locked_funds.pre_commit_deposits IS 'Amount of FIL (in attoFIL) locked due to it being used as a PreCommit deposit. When a Miner PreCommits a Sector, they must supply a "precommit deposit" for the Sector, which acts as collateral. If the Sector is not ProveCommitted on time, this deposit is removed and burned.'; + + +-- ---------------------------------------------------------------- +-- Name: miner_pre_commit_infos +-- Model: MinerPreCommitInfo +-- Growth: About 180 per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_pre_commit_infos ( + miner_id text NOT NULL, + sector_id bigint NOT NULL, + state_root text NOT NULL, + sealed_cid text NOT NULL, + seal_rand_epoch bigint, + expiration_epoch bigint, + pre_commit_deposit numeric NOT NULL, + pre_commit_epoch bigint, + deal_weight numeric NOT NULL, + verified_deal_weight numeric NOT NULL, + is_replace_capacity boolean, + replace_sector_deadline bigint, + replace_sector_partition bigint, + replace_sector_number bigint, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_pre_commit_infos ADD CONSTRAINT miner_pre_commit_infos_pkey PRIMARY KEY (height, miner_id, sector_id, state_root); +CREATE INDEX miner_pre_commit_infos_height_idx ON {{ .SchemaName | default "public"}}.miner_pre_commit_infos USING btree (height DESC); + +-- Convert miner_pre_commit_infos to a hypertable partitioned on height (time) +-- Assume ~5 per epoch, ~300 bytes per table row +-- Height chunked per 7 days so we expect 20160*5 = ~100800 rows per chunk, ~28MiB per chunk +SELECT create_hypertable( + 'miner_pre_commit_infos', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_pre_commit_infos', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_pre_commit_infos IS 'Information on sector PreCommits.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.miner_id IS 'Address of the miner who owns the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.sector_id IS 'Numeric identifier for the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.sealed_cid IS 'CID of the sealed sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.seal_rand_epoch IS 'Seal challenge epoch. Epoch at which randomness should be drawn to tie Proof-of-Replication to a chain.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.expiration_epoch IS 'Epoch this sector expires.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.pre_commit_deposit IS 'Amount of FIL (in attoFIL) used as a PreCommit deposit. If the Sector is not ProveCommitted on time, this deposit is removed and burned.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.pre_commit_epoch IS 'Epoch this PreCommit was created.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.deal_weight IS 'Total space*time of submitted deals.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.verified_deal_weight IS 'Total space*time of submitted verified deals.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.is_replace_capacity IS 'Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.replace_sector_deadline IS 'The deadline location of the sector to replace.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.replace_sector_partition IS 'The partition location of the sector to replace.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.replace_sector_number IS 'ID of the committed capacity sector to replace.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_pre_commit_infos.height IS 'Epoch this PreCommit information was added/changed.'; + +-- ---------------------------------------------------------------- +-- Name: miner_sector_deals +-- Model: MinerSectorDeal +-- Notes: This was a hypertable in v0, removed due to low rate of growth +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_sector_deals ( + miner_id text NOT NULL, + sector_id bigint NOT NULL, + deal_id bigint NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_sector_deals ADD CONSTRAINT miner_sector_deals_pkey PRIMARY KEY (height, miner_id, sector_id, deal_id); +CREATE INDEX miner_deal_sectors_height_idx ON {{ .SchemaName | default "public"}}.miner_sector_deals USING btree (height DESC); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_sector_deals IS 'Mapping of Deal IDs to their respective Miner and Sector IDs.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_deals.miner_id IS 'Address of the miner the deal is with.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_deals.sector_id IS 'Numeric identifier of the sector the deal is for.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_deals.deal_id IS 'Numeric identifier for the deal.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_deals.height IS 'Epoch at which this deal was added/updated.'; + + +-- ---------------------------------------------------------------- +-- Name: miner_sector_events +-- Model: miner.MinerSectorEvent +-- Growth: About 670 per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_sector_events ( + miner_id text NOT NULL, + sector_id bigint NOT NULL, + state_root text NOT NULL, + event {{ .SchemaName | default "public"}}.miner_sector_event_type NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_sector_events ADD CONSTRAINT miner_sector_events_pkey PRIMARY KEY (height, sector_id, event, miner_id, state_root); +CREATE INDEX miner_sector_events_height_idx ON {{ .SchemaName | default "public"}}.miner_sector_events USING btree (height DESC); + +-- Convert miner_sector_events to a hypertable partitioned on height (time) +-- Assume ~670 per epoch, ~300 bytes per table row +-- Height chunked per 7 days so we expect 20160*5 = ~13507200 rows per chunk, ~3.8GiB per chunk +SELECT create_hypertable( + 'miner_sector_events', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_sector_events', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_sector_events IS 'Sector events on-chain per Miner/Sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_events.miner_id IS 'Address of the miner who owns the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_events.sector_id IS 'Numeric identifier of the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_events.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_events.event IS 'Name of the event that occurred.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_events.height IS 'Epoch at which this event occurred.'; + + +-- ---------------------------------------------------------------- +-- Name: miner_sector_infos +-- Model: miner.MinerSectorInfo +-- Growth: About 180 per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_sector_infos ( + miner_id text NOT NULL, + sector_id bigint NOT NULL, + state_root text NOT NULL, + sealed_cid text NOT NULL, + activation_epoch bigint, + expiration_epoch bigint, + deal_weight numeric NOT NULL, + verified_deal_weight numeric NOT NULL, + initial_pledge numeric NOT NULL, + expected_day_reward numeric NOT NULL, + expected_storage_pledge numeric NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_sector_infos ADD CONSTRAINT miner_sector_infos_pkey PRIMARY KEY (height, miner_id, sector_id, state_root); +CREATE INDEX miner_sector_infos_height_idx ON {{ .SchemaName | default "public"}}.miner_sector_infos USING btree (height DESC); + +-- Convert miner_sector_infos to a hypertable partitioned on height (time) +-- Assume ~180 per epoch, ~300 bytes per table row +-- Height chunked per 7 days so we expect 20160*5 = ~3628800 rows per chunk, ~1GiB per chunk +SELECT create_hypertable( + 'miner_sector_infos', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_sector_infos', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_sector_infos IS 'Latest state of sectors by Miner.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.miner_id IS 'Address of the miner who owns the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.sector_id IS 'Numeric identifier of the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.sealed_cid IS 'The root CID of the Sealed Sector’s merkle tree. Also called CommR, or "replica commitment".'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.activation_epoch IS 'Epoch during which the sector proof was accepted.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.expiration_epoch IS 'Epoch during which the sector expires.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.deal_weight IS 'Integral of active deals over sector lifetime.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.verified_deal_weight IS 'Integral of active verified deals over sector lifetime.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.initial_pledge IS 'Pledge collected to commit this sector (in attoFIL).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.expected_day_reward IS 'Expected one day projection of reward for sector computed at activation time (in attoFIL).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.expected_storage_pledge IS 'Expected twenty day projection of reward for sector computed at activation time (in attoFIL).'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_infos.height IS 'Epoch at which this sector info was added/updated.'; + + +-- ---------------------------------------------------------------- +-- Name: miner_sector_posts +-- Model. miner.MinerSectorPost +-- Growth: About 9000 per epoch +-- Notes: This was chunked per 7 days in v0 +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.miner_sector_posts ( + miner_id text NOT NULL, + sector_id bigint NOT NULL, + height bigint NOT NULL, + post_message_cid text +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.miner_sector_posts ADD CONSTRAINT miner_sector_posts_pkey PRIMARY KEY (height, miner_id, sector_id); +CREATE INDEX miner_sector_posts_height_idx ON {{ .SchemaName | default "public"}}.miner_sector_posts USING btree (height DESC); + +-- Convert miner_sector_posts to a hypertable partitioned on height (time) +-- Assume ~5 per epoch, ~150 bytes per table row +-- Height chunked per 7 days so we expect 2880*9000 = ~25920000 rows per chunk, ~3.7GiB per chunk +SELECT create_hypertable( + 'miner_sector_posts', + 'height', + chunk_time_interval => 2880, + if_not_exists => TRUE +); +SELECT set_integer_now_func('miner_sector_posts', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.miner_sector_posts IS 'Proof of Spacetime for sectors.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_posts.miner_id IS 'Address of the miner who owns the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_posts.sector_id IS 'Numeric identifier of the sector.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_posts.height IS 'Epoch at which this PoSt message was executed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.miner_sector_posts.post_message_cid IS 'CID of the PoSt message.'; + + +-- ---------------------------------------------------------------- +-- Name: multisig_approvals +-- Model: msapprovals.MultisigApproval +-- Notes: This was a hypertable in v0, removed due to low rate of growth +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.multisig_approvals ( + height bigint NOT NULL, + state_root text NOT NULL, + multisig_id text NOT NULL, + message text NOT NULL, + method bigint NOT NULL, + approver text NOT NULL, + threshold bigint NOT NULL, + initial_balance numeric NOT NULL, + gas_used bigint NOT NULL, + transaction_id bigint NOT NULL, + "to" text NOT NULL, + value numeric NOT NULL, + signers jsonb NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.multisig_approvals ADD CONSTRAINT multisig_approvals_pkey PRIMARY KEY (height, state_root, multisig_id, message, approver); +CREATE INDEX multisig_approvals_height_idx ON {{ .SchemaName | default "public"}}.multisig_approvals USING btree (height DESC); + +-- ---------------------------------------------------------------- +-- Name: multisig_transactions +-- Model: MultisigTransaction +-- Growth: Less than 1 per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.multisig_transactions ( + height bigint NOT NULL, + multisig_id text NOT NULL, + state_root text NOT NULL, + transaction_id bigint NOT NULL, + "to" text NOT NULL, + value text NOT NULL, + method bigint NOT NULL, + params bytea, + approved jsonb NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.multisig_transactions ADD CONSTRAINT multisig_transactions_pkey PRIMARY KEY (height, state_root, multisig_id, transaction_id); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.multisig_transactions IS 'Details of pending transactions involving multisig actors.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.height IS 'Epoch at which this transaction was executed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.multisig_id IS 'Address of the multisig actor involved in the transaction.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.transaction_id IS 'Number identifier for the transaction - unique per multisig.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions."to" IS 'Address of the recipient who will be sent a message if the proposal is approved.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.value IS 'Amount of FIL (in attoFIL) that will be transferred if the proposal is approved.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.method IS 'The method number to invoke on the recipient if the proposal is approved. Only unique to the actor the method is being invoked on. A method number of 0 is a plain token transfer - no method exectution.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.params IS 'CBOR encoded bytes of parameters to send to the method that will be invoked if the proposal is approved.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.multisig_transactions.approved IS 'Addresses of signers who have approved the transaction. 0th entry is the proposer.'; + + +-- ---------------------------------------------------------------- +-- Name: parsed_messages +-- Model: messages.ParsedMessage +-- Growth: About 400 per epoch +-- Notes: More accurate chunk size calculation based on actual row sizes +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.parsed_messages ( + cid text NOT NULL, + height bigint NOT NULL, + "from" text NOT NULL, + "to" text NOT NULL, + value numeric NOT NULL, + method text NOT NULL, + params jsonb +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.parsed_messages ADD CONSTRAINT parsed_messages_pkey PRIMARY KEY (height, cid); +CREATE INDEX parsed_messages_height_idx ON {{ .SchemaName | default "public"}}.parsed_messages USING btree (height DESC); +CREATE INDEX message_parsed_from_idx ON {{ .SchemaName | default "public"}}.parsed_messages USING hash ("from"); +CREATE INDEX message_parsed_method_idx ON {{ .SchemaName | default "public"}}.parsed_messages USING hash (method); +CREATE INDEX message_parsed_to_idx ON {{ .SchemaName | default "public"}}.parsed_messages USING hash ("to"); + +-- Convert messages to a hypertable partitioned on height (time) +-- Assume ~400 messages per epoch, ~2500 bytes per table row +-- Height chunked per day so we expect 2880*400 = ~1152000 rows per chunk, ~2.7GiB per chunk +SELECT create_hypertable( + 'parsed_messages', + 'height', + chunk_time_interval => 2880, + if_not_exists => TRUE +); +SELECT set_integer_now_func('parsed_messages', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.parsed_messages IS 'Messages parsed to extract useful information.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages.cid IS 'CID of the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages.height IS 'Epoch this message was executed at.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages."from" IS 'Address of the actor that sent the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages."to" IS 'Address of the actor that received the message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages.value IS 'Amount of FIL (in attoFIL) transferred by this message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages.method IS 'The name of the method that was invoked on the recipient actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.parsed_messages.params IS 'Method parameters parsed and serialized as a JSON object.'; + + +-- ---------------------------------------------------------------- +-- Name: power_actor_claims +-- Model: power.PowerActorClaim +-- Growth: About 7 rows per epoch +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.power_actor_claims ( + height bigint NOT NULL, + miner_id text NOT NULL, + state_root text NOT NULL, + raw_byte_power numeric NOT NULL, + quality_adj_power numeric NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.power_actor_claims ADD CONSTRAINT power_actor_claims_pkey PRIMARY KEY (height, miner_id, state_root); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.power_actor_claims IS 'Miner power claims recorded by the power actor.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.power_actor_claims.height IS 'Epoch this claim was made.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.power_actor_claims.miner_id IS 'Address of miner making the claim.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.power_actor_claims.state_root IS 'CID of the parent state root at this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.power_actor_claims.raw_byte_power IS 'Sum of raw byte storage power for a miner''s sectors. Raw byte power is the size of a sector in bytes.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.power_actor_claims.quality_adj_power IS 'Sum of quality adjusted storage power for a miner''s sectors. Quality adjusted power is a weighted average of the quality of its space and it is based on the size, duration and quality of its deals.'; + + +-- ---------------------------------------------------------------- +-- Name: receipts +-- Model: messages.Receipt +-- Growth: About 400 per epoch +-- Notes: This was chunked daily in v0, now converted to weekly +-- ---------------------------------------------------------------- +CREATE TABLE {{ .SchemaName | default "public"}}.receipts ( + message text NOT NULL, + state_root text NOT NULL, + idx bigint NOT NULL, + exit_code bigint NOT NULL, + gas_used bigint NOT NULL, + height bigint NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.receipts ADD CONSTRAINT receipts_pkey PRIMARY KEY (height, message, state_root); +CREATE INDEX receipts_height_idx ON {{ .SchemaName | default "public"}}.receipts USING btree (height DESC); + +-- Convert receipts to a hypertable partitioned on height (time) +-- Assume ~400 receipts per epoch, ~215 bytes per table row +-- Height chunked per day so we expect 20160*250 = ~8064000 rows per chunk, ~1.6GiB per chunk +SELECT create_hypertable( + 'receipts', + 'height', + chunk_time_interval => 20160, + if_not_exists => TRUE +); +SELECT set_integer_now_func('receipts', 'current_height', replace_if_exists => true); + +COMMENT ON TABLE {{ .SchemaName | default "public"}}.receipts IS 'Message reciepts after being applied to chain state by message CID and parent state root CID of tipset when message was executed.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.message IS 'CID of the message this receipt belongs to.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.state_root IS 'CID of the parent state root that this epoch.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.idx IS 'Index of message indicating execution order.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.exit_code IS 'The exit code that was returned as a result of executing the message. Exit code 0 indicates success. Codes 0-15 are reserved for use by the runtime. Codes 16-31 are common codes shared by different actors. Codes 32+ are actor specific.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.gas_used IS 'A measure of the amount of resources (or units of gas) consumed, in order to execute a message.'; +COMMENT ON COLUMN {{ .SchemaName | default "public"}}.receipts.height IS 'Epoch the message was executed and receipt generated.'; + + +-- ---------------------------------------------------------------- +-- Name: visor_processing_reports +-- Model: visor.ProcessingReport +-- Growth: About 8 per epoch +-- ---------------------------------------------------------------- + +CREATE TABLE {{ .SchemaName | default "public"}}.visor_processing_reports ( + height bigint NOT NULL, + state_root text NOT NULL, + reporter text NOT NULL, + task text NOT NULL, + started_at timestamp with time zone NOT NULL, + completed_at timestamp with time zone NOT NULL, + status text, + status_information text, + errors_detected jsonb +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.visor_processing_reports ADD CONSTRAINT visor_processing_reports_pkey PRIMARY KEY (height, state_root, reporter, task, started_at); + + +-- ---------------------------------------------------------------- +-- Name: visor_version +-- Notes: This table can be created during version checking before a migration. +-- ---------------------------------------------------------------- + +CREATE TABLE IF NOT EXISTS {{ .SchemaName | default "public"}}.visor_version ( + major integer NOT NULL +); +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.visor_version DROP CONSTRAINT IF EXISTS visor_version_pkey; +ALTER TABLE ONLY {{ .SchemaName | default "public"}}.visor_version ADD CONSTRAINT visor_version_pkey PRIMARY KEY (major); +INSERT INTO {{ .SchemaName | default "public"}}.visor_version (major) VALUES (1); + + +-- ===================================================================================================================== +-- VIEWS +-- ===================================================================================================================== + +-- +-- Name: chain_visualizer_blocks_view +-- + +CREATE VIEW {{ .SchemaName | default "public"}}.chain_visualizer_blocks_view AS + SELECT block_headers.cid, + block_headers.parent_weight, + block_headers.parent_state_root, + block_headers.height, + block_headers.miner, + block_headers."timestamp", + block_headers.win_count, + block_headers.parent_base_fee, + block_headers.fork_signaling + FROM {{ .SchemaName | default "public"}}.block_headers; + + +-- +-- Name: chain_visualizer_blocks_with_parents_view +-- + +CREATE VIEW {{ .SchemaName | default "public"}}.chain_visualizer_blocks_with_parents_view AS + SELECT block_parents.block, + block_parents.parent, + b.miner, + b.height, + b."timestamp" + FROM ({{ .SchemaName | default "public"}}.block_parents + JOIN {{ .SchemaName | default "public"}}.block_headers b ON ((block_parents.block = b.cid))); + +-- +-- Name: chain_visualizer_chain_data_view; Type: VIEW; Schema: public; Owner: postgres +-- + +CREATE VIEW {{ .SchemaName | default "public"}}.chain_visualizer_chain_data_view AS + SELECT main_block.cid AS block, + bp.parent, + main_block.miner, + main_block.height, + main_block.parent_weight AS parentweight, + main_block."timestamp", + main_block.parent_state_root AS parentstateroot, + parent_block."timestamp" AS parenttimestamp, + parent_block.height AS parentheight, + pac.raw_byte_power AS parentpower, + main_block."timestamp" AS syncedtimestamp, + ( SELECT count(*) AS count + FROM {{ .SchemaName | default "public"}}.block_messages + WHERE (block_messages.block = main_block.cid)) AS messages + FROM ((({{ .SchemaName | default "public"}}.block_headers main_block + LEFT JOIN {{ .SchemaName | default "public"}}.block_parents bp ON ((bp.block = main_block.cid))) + LEFT JOIN {{ .SchemaName | default "public"}}.block_headers parent_block ON ((parent_block.cid = bp.parent))) + LEFT JOIN {{ .SchemaName | default "public"}}.power_actor_claims pac ON ((main_block.parent_state_root = pac.state_root))); + +-- +-- Name: chain_visualizer_orphans_view; Type: VIEW; Schema: public; Owner: postgres +-- + +CREATE VIEW {{ .SchemaName | default "public"}}.chain_visualizer_orphans_view AS + SELECT block_headers.cid AS block, + block_headers.miner, + block_headers.height, + block_headers.parent_weight AS parentweight, + block_headers."timestamp", + block_headers.parent_state_root AS parentstateroot, + block_parents.parent + FROM ({{ .SchemaName | default "public"}}.block_headers + LEFT JOIN {{ .SchemaName | default "public"}}.block_parents ON ((block_headers.cid = block_parents.parent))) + WHERE (block_parents.block IS NULL); + +-- +-- Name: derived_consensus_chain_view; Type: MATERIALIZED VIEW; Schema: public; Owner: postgres +-- + +CREATE MATERIALIZED VIEW {{ .SchemaName | default "public"}}.derived_consensus_chain_view AS + WITH RECURSIVE consensus_chain AS ( + SELECT b.cid, + b.height, + b.miner, + b."timestamp", + b.parent_state_root, + b.win_count + FROM {{ .SchemaName | default "public"}}.block_headers b + WHERE (b.parent_state_root = ( SELECT block_headers.parent_state_root + FROM {{ .SchemaName | default "public"}}.block_headers + ORDER BY block_headers.height DESC, block_headers.parent_weight DESC + LIMIT 1)) + UNION + SELECT p.cid, + p.height, + p.miner, + p."timestamp", + p.parent_state_root, + p.win_count + FROM (({{ .SchemaName | default "public"}}.block_headers p + JOIN {{ .SchemaName | default "public"}}.block_parents pb ON ((p.cid = pb.parent))) + JOIN consensus_chain c ON ((c.cid = pb.block))) + ) + SELECT consensus_chain.cid, + consensus_chain.height, + consensus_chain.miner, + consensus_chain."timestamp", + consensus_chain.parent_state_root, + consensus_chain.win_count + FROM consensus_chain + WITH NO DATA; + + +-- +-- Name: state_heights; Type: MATERIALIZED VIEW; Schema: public; Owner: postgres +-- + +CREATE MATERIALIZED VIEW {{ .SchemaName | default "public"}}.state_heights AS + SELECT DISTINCT block_headers.height, + block_headers.parent_state_root AS parentstateroot + FROM {{ .SchemaName | default "public"}}.block_headers + WITH NO DATA; +CREATE INDEX state_heights_height_index ON {{ .SchemaName | default "public"}}.state_heights USING btree (height); +CREATE INDEX state_heights_parentstateroot_index ON {{ .SchemaName | default "public"}}.state_heights USING btree (parentstateroot); + +` diff --git a/schemas/version.go b/schemas/version.go index 3042ac726..912617ab3 100644 --- a/schemas/version.go +++ b/schemas/version.go @@ -7,3 +7,7 @@ func RegisterSchema(major int) { LatestMajor = major } } + +type Config struct { + SchemaName string // name of the postgresql schema in which any database objects should be created +} diff --git a/storage/catalog.go b/storage/catalog.go index 4b51086f5..27420fb69 100644 --- a/storage/catalog.go +++ b/storage/catalog.go @@ -34,7 +34,7 @@ func NewCatalog(cfg config.StorageConf) (*Catalog, error) { dburl = sc.URL } - db, err := NewDatabase(context.TODO(), dburl, sc.PoolSize, sc.ApplicationName, sc.AllowUpsert) + db, err := NewDatabase(context.TODO(), dburl, sc.PoolSize, sc.ApplicationName, sc.SchemaName, sc.AllowUpsert) if err != nil { return nil, fmt.Errorf("failed to create postgresql storage %q: %w", name, err) } diff --git a/storage/migrate.go b/storage/migrate.go index afa09dc45..ef2223b40 100644 --- a/storage/migrate.go +++ b/storage/migrate.go @@ -3,7 +3,10 @@ package storage import ( "context" "fmt" + "reflect" "strconv" + "strings" + "text/template" "github.com/go-pg/migrations/v8" "github.com/go-pg/pg/v10" @@ -12,14 +15,18 @@ import ( "github.com/filecoin-project/sentinel-visor/model" "github.com/filecoin-project/sentinel-visor/schemas" v0 "github.com/filecoin-project/sentinel-visor/schemas/v0" + v1 "github.com/filecoin-project/sentinel-visor/schemas/v1" ) // GetSchemaVersions returns the schema version in the database and the latest schema version defined by the available // migrations. func (d *Database) GetSchemaVersions(ctx context.Context) (model.Version, model.Version, error) { + latest := LatestSchemaVersion() + // If we're already connected then use that connection - if d.DB != nil { - return getSchemaVersions(ctx, d.DB) + if d.db != nil { + dbVersion, _, err := getDatabaseSchemaVersion(ctx, d.db, d.schemaName) + return dbVersion, latest, err } // Temporarily connect @@ -28,38 +35,54 @@ func (d *Database) GetSchemaVersions(ctx context.Context) (model.Version, model. return model.Version{}, model.Version{}, xerrors.Errorf("connect: %w", err) } defer db.Close() // nolint: errcheck - return getSchemaVersions(ctx, db) + dbVersion, _, err := getDatabaseSchemaVersion(ctx, db, "public") + return dbVersion, latest, err } -// getSchemaVersions returns the schema version in the database and the schema version defined by the available -// migrations. -func getSchemaVersions(ctx context.Context, db *pg.DB) (model.Version, model.Version, error) { - // Ensure the visor_version table exists - _, err := db.Exec(` - CREATE TABLE IF NOT EXISTS public.visor_version ( - "major" int NOT NULL, - PRIMARY KEY ("major") - ) - `) +// getDatabaseSchemaVersion returns the schema version in use by the database and whether the schema versioning +// tables have been initialized. If no schema version tables can be found then the database is assumed to be +// uninitialized and a zero version and false value will be returned. The returned boolean will only be true +// if the schema versioning tables exist and are populated correctly. +func getDatabaseSchemaVersion(ctx context.Context, db *pg.DB, schemaName string) (model.Version, bool, error) { + vvExists, err := tableExists(ctx, db, schemaName, "visor_version") + if err != nil { + return model.Version{}, false, xerrors.Errorf("checking if visor_version exists:%w", err) + } + + migExists, err := tableExists(ctx, db, schemaName, "gopg_migrations") if err != nil { - return model.Version{}, model.Version{}, xerrors.Errorf("ensure visor_version exists :%w", err) + return model.Version{}, false, xerrors.Errorf("checking if gopg_migrations exists:%w", err) + } + + if !migExists && !vvExists { + // Uninitialized database + return model.Version{}, false, nil } + // Ensure the visor_version table exists + vvTableName := schemaName + ".visor_version" var major int - _, err = db.QueryOne(pg.Scan(&major), `SELECT major FROM visor_version LIMIT 1`) + _, err = db.QueryOne(pg.Scan(&major), `SELECT major FROM ? LIMIT 1`, pg.SafeQuery(vvTableName)) if err != nil && err != pg.ErrNoRows { - return model.Version{}, model.Version{}, err + return model.Version{}, false, err } - // Run the migration init to ensure we always have a migrations table - _, _, err = migrations.Run(db, "init") + coll, err := collectionForVersion(model.Version{ + Major: major, + }) if err != nil { - return model.Version{}, model.Version{}, xerrors.Errorf("migration table init: %w", err) + return model.Version{}, false, err } + coll.SetTableName(schemaName + ".gopg_migrations") - migration, err := migrations.Version(db) + migration, err := coll.Version(db) if err != nil { - return model.Version{}, model.Version{}, xerrors.Errorf("unable to determine schema version: %w", err) + return model.Version{}, false, xerrors.Errorf("unable to determine schema version: %w", err) + } + + if major == 0 && migration == 0 { + // Database has the version tables but they are unpopulated so database is not initialized + return model.Version{}, false, nil } dbVersion := model.Version{ @@ -67,14 +90,82 @@ func getSchemaVersions(ctx context.Context, db *pg.DB) (model.Version, model.Ver Patch: int(migration), } - return dbVersion, LatestSchemaVersion(), nil + return dbVersion, true, nil +} + +// initDatabaseSchema initializes the version tables for tracking schema version installed in the database +func initDatabaseSchema(ctx context.Context, db *pg.DB, schemaName string) error { + if schemaName != "public" { + _, err := db.Exec(`CREATE SCHEMA IF NOT EXISTS ?`, pg.SafeQuery(schemaName)) + if err != nil { + return xerrors.Errorf("ensure schema exists :%w", err) + } + } + + // Ensure the visor_version table exists + vvTableName := schemaName + ".visor_version" + _, err := db.Exec(` + CREATE TABLE IF NOT EXISTS ? ( + "major" int NOT NULL, + PRIMARY KEY ("major") + ) + `, pg.SafeQuery(vvTableName)) + if err != nil { + return xerrors.Errorf("ensure visor_version exists :%w", err) + } + + // Ensure the gopg migrations table exists + migTableName := schemaName + ".gopg_migrations" + _, err = db.Exec(` + CREATE TABLE IF NOT EXISTS ? ( + id serial, + version bigint, + created_at timestamptz + ) + `, pg.SafeQuery(migTableName)) + if err != nil { + return xerrors.Errorf("ensure visor_version exists :%w", err) + } + + return nil +} + +func validateDatabaseSchemaVersion(ctx context.Context, db *pg.DB, schemaName string) (model.Version, error) { + // Check if the version of the schema is compatible + dbVersion, initialized, err := getDatabaseSchemaVersion(ctx, db, schemaName) + if err != nil { + return model.Version{}, xerrors.Errorf("get schema version: %w", err) + } + + if !initialized { + return model.Version{}, xerrors.Errorf("schema not installed in database") + } + + latestVersion := LatestSchemaVersion() + switch { + case latestVersion.Before(dbVersion): + // porridge too hot + return model.Version{}, ErrSchemaTooNew + case dbVersion.Before(model.OldestSupportedSchemaVersion): + // porridge too cold + return model.Version{}, ErrSchemaTooOld + default: + // just right + return dbVersion, nil + } } // LatestSchemaVersion returns the most recent version of the model schema. It is based on the highest migration version // in the highest major schema version func LatestSchemaVersion() model.Version { + return latestSchemaVersionForMajor(schemas.LatestMajor) +} + +// latestSchemaVersionForMajor returns the most recent version of the model schema for a given patch version. It is +// based on the highest migration version +func latestSchemaVersionForMajor(major int) model.Version { version := model.Version{ - Major: schemas.LatestMajor, + Major: major, } coll, err := collectionForVersion(version) @@ -105,24 +196,30 @@ func (d *Database) MigrateSchema(ctx context.Context) error { // MigrateSchema migrates the database schema to a specific version. Note that downgrading a schema to an earlier // version is destructive and may result in the loss of data. func (d *Database) MigrateSchemaTo(ctx context.Context, target model.Version) error { + if target.Major == 0 && d.schemaName != "public" { + return xerrors.Errorf("v0 schema must use the public postgresql schema") + } + db, err := connect(ctx, d.opt) if err != nil { return xerrors.Errorf("connect: %w", err) } defer db.Close() // nolint: errcheck - dbVersion, latestVersion, err := getSchemaVersions(ctx, db) + dbVersion, initialized, err := getDatabaseSchemaVersion(ctx, db, d.schemaName) if err != nil { return xerrors.Errorf("get schema versions: %w", err) } log.Infof("current database schema is version %s", dbVersion) - if target.Major != dbVersion.Major { + // Check that we are not trying to migrate to a different major version of an already installed schema + if initialized && target.Major != dbVersion.Major { return xerrors.Errorf("cannot migrate to a different major schema version. database version=%s, target version=%s", dbVersion, target) } + latestVersion := latestSchemaVersionForMajor(target.Major) if latestVersion.Patch < target.Patch { - return xerrors.Errorf("no migrations found for version %d", target) + return xerrors.Errorf("no migrations found for version %s", target) } if dbVersion == target { @@ -133,6 +230,7 @@ func (d *Database) MigrateSchemaTo(ctx context.Context, target model.Version) er if err != nil { return xerrors.Errorf("no schema definition corresponds to version %s: %w", target, err) } + coll.SetTableName(d.schemaName + ".gopg_migrations") if err := checkMigrationSequence(ctx, coll, dbVersion.Patch, target.Patch); err != nil { return xerrors.Errorf("check migration sequence: %w", err) @@ -143,18 +241,28 @@ func (d *Database) MigrateSchemaTo(ctx context.Context, target model.Version) er return xerrors.Errorf("acquiring schema lock: %w", err) } + if err := initDatabaseSchema(ctx, db, d.schemaName); err != nil { + return xerrors.Errorf("initializing schema version tables: %w", err) + } + // Check if we need to create the base schema if dbVersion.Patch == 0 { - log.Infof("creating base schema for major version %d", dbVersion.Major) + log.Infof("creating base schema for major version %d", target.Major) + + cfg := schemas.Config{ + SchemaName: d.schemaName, + } - base, err := baseForVersion(dbVersion) + base, err := baseForVersion(target, cfg) if err != nil { - return xerrors.Errorf("no base schema defined for version %s: %w", dbVersion, err) + return xerrors.Errorf("no base schema defined for version %s: %w", target, err) } if _, err := db.Exec(base); err != nil { return xerrors.Errorf("creating base schema: %w", err) } + + dbVersion.Major = target.Major } // Remember to release the lock @@ -203,11 +311,20 @@ func checkMigrationSequence(ctx context.Context, coll *migrations.Collection, fr versions[m.Version] = true } + if from == to { + return nil + } + if from > to { to, from = from, to } for i := from; i <= to; i++ { + // Migration 0 is always a no-op since it's the base schema + if i == 0 { + continue + } + if !versions[int64(i)] { return xerrors.Errorf("missing migration for schema version %d", i) } @@ -220,16 +337,63 @@ func collectionForVersion(version model.Version) (*migrations.Collection, error) switch version.Major { case 0: return v0.Patches, nil + case 1: + return v1.Patches, nil default: return nil, xerrors.Errorf("unsupported major version: %d", version.Major) } } -func baseForVersion(version model.Version) (string, error) { +func baseForVersion(version model.Version, cfg schemas.Config) (string, error) { switch version.Major { case 0: return v0.Base, nil + case 1: + tmpl, err := template.New("base").Funcs(schemaTemplateFuncMap).Parse(v1.BaseTemplate) + if err != nil { + return "", xerrors.Errorf("parse base template: %w", err) + } + var buf strings.Builder + if err := tmpl.Execute(&buf, cfg); err != nil { + return "", xerrors.Errorf("execute base template: %w", err) + } + return buf.String(), nil default: return "", xerrors.Errorf("unsupported major version: %d", version.Major) } } + +func isEmpty(val interface{}) bool { + v := reflect.ValueOf(val) + if !v.IsValid() { + return true + } + + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct: + return false + default: + return v.IsNil() + } +} + +var schemaTemplateFuncMap = template.FuncMap{ + "default": func(def interface{}, value interface{}) interface{} { + if isEmpty(value) { + return def + } + return value + }, +} diff --git a/storage/sql.go b/storage/sql.go index d39574dd8..69c9da6f8 100644 --- a/storage/sql.go +++ b/storage/sql.go @@ -84,7 +84,7 @@ var ( const MaxPostgresNameLength = 64 -func NewDatabase(ctx context.Context, url string, poolSize int, name string, upsert bool) (*Database, error) { +func NewDatabase(ctx context.Context, url string, poolSize int, name string, schemaName string, upsert bool) (*Database, error) { if len(name) > MaxPostgresNameLength { return nil, ErrNameTooLong } @@ -98,21 +98,58 @@ func NewDatabase(ctx context.Context, url string, poolSize int, name string, ups opt.ApplicationName = name } + onConnect := func(ctx context.Context, conn *pg.Conn) error { + _, err := conn.Exec("set search_path=?", schemaName) + if err != nil { + log.Errorf("failed to set postgresql search_path: %v", err) + } + return nil + } + + if opt.OnConnect == nil { + opt.OnConnect = onConnect + } else { + // Chain functions + prevOnConnect := opt.OnConnect + opt.OnConnect = func(ctx context.Context, conn *pg.Conn) error { + if err := prevOnConnect(ctx, conn); err != nil { + return err + } + return onConnect(ctx, conn) + } + } + return &Database{ - opt: opt, - Clock: clock.New(), - Upsert: upsert, + opt: opt, + schemaName: schemaName, + Clock: clock.New(), + Upsert: upsert, + }, nil +} + +func NewDatabaseFromDB(ctx context.Context, db *pg.DB, schemaName string) (*Database, error) { + dbVersion, err := validateDatabaseSchemaVersion(ctx, db, schemaName) + if err != nil { + return nil, err + } + + return &Database{ + db: db, + opt: new(pg.Options), + Clock: clock.New(), + version: dbVersion, }, nil } var _ Connector = (*Database)(nil) type Database struct { - DB *pg.DB - opt *pg.Options - Clock clock.Clock - Upsert bool - version model.Version // schema version identified in the database + db *pg.DB + opt *pg.Options + schemaName string + Clock clock.Clock + Upsert bool + version model.Version // schema version identified in the database } // Connect opens a connection to the database and checks that the schema is compatible with the version required @@ -124,28 +161,16 @@ func (d *Database) Connect(ctx context.Context) error { return xerrors.Errorf("connect: %w", err) } - // Check if the version of the schema is compatible - dbVersion, latestVersion, err := getSchemaVersions(ctx, db) + dbVersion, err := validateDatabaseSchemaVersion(ctx, db, d.schemaName) if err != nil { _ = db.Close() // nolint: errcheck - return xerrors.Errorf("get schema versions: %w", err) + return err } - switch { - case latestVersion.Before(dbVersion): - // porridge too hot - _ = db.Close() // nolint: errcheck - return ErrSchemaTooNew - case dbVersion.Before(model.OldestSupportedSchemaVersion): - // porridge too cold - _ = db.Close() // nolint: errcheck - return ErrSchemaTooOld - default: - // just right - d.DB = db - d.version = dbVersion - return nil - } + d.db = db + d.version = dbVersion + + return nil } func connect(ctx context.Context, opt *pg.Options) (*pg.DB, error) { @@ -168,11 +193,11 @@ func connect(ctx context.Context, opt *pg.Options) (*pg.DB, error) { } func (d *Database) IsConnected(ctx context.Context) bool { - if d.DB == nil { + if d.db == nil { return false } - if err := d.DB.Ping(ctx); err != nil { + if err := d.db.Ping(ctx); err != nil { return false } @@ -181,12 +206,12 @@ func (d *Database) IsConnected(ctx context.Context) bool { func (d *Database) Close(ctx context.Context) error { // Advisory locks are automatically closed at end of session but its still good practice to close explicitly - if err := SchemaLock.UnlockShared(ctx, d.DB); err != nil && !errors.Is(err, context.Canceled) { + if err := SchemaLock.UnlockShared(ctx, d.db); err != nil && !errors.Is(err, context.Canceled) { log.Errorf("failed to release schema lock: %v", err) } - err := d.DB.Close() - d.DB = nil + err := d.db.Close() + d.db = nil return err } @@ -194,8 +219,8 @@ func (d *Database) Close(ctx context.Context) error { // and returns an error if they are incompatible func (d *Database) VerifyCurrentSchema(ctx context.Context) error { // If we're already connected then use that connection - if d.DB != nil { - return verifyCurrentSchema(ctx, d.DB) + if d.db != nil { + return verifyCurrentSchema(ctx, d.db, d.schemaName) } // Temporarily connect @@ -204,16 +229,37 @@ func (d *Database) VerifyCurrentSchema(ctx context.Context) error { return xerrors.Errorf("connect: %w", err) } defer db.Close() // nolint: errcheck - return verifyCurrentSchema(ctx, db) + return verifyCurrentSchema(ctx, db, "public") } -func verifyCurrentSchema(ctx context.Context, db *pg.DB) error { +func verifyCurrentSchema(ctx context.Context, db *pg.DB, schemaName string) error { + type versionable interface { + AsVersion(model.Version) (interface{}, bool) + } + + version, initialized, err := getDatabaseSchemaVersion(ctx, db, schemaName) + if err != nil { + return xerrors.Errorf("get schema version: %w", err) + } + + if !initialized { + return xerrors.Errorf("schema not installed in database") + } + valid := true for _, model := range models { + if vm, ok := model.(versionable); ok { + m, ok := vm.AsVersion(version) + if !ok { + return xerrors.Errorf("model %T does not support version %s", model, version) + } + model = m + } + q := db.Model(model) tm := q.TableModel() m := tm.Table() - err := verifyModel(ctx, db, m) + err := verifyModel(ctx, db, schemaName, m) if err != nil { valid = false log.Errorf("verify schema: %v", err) @@ -226,11 +272,10 @@ func verifyCurrentSchema(ctx context.Context, db *pg.DB) error { return nil } -func verifyModel(ctx context.Context, db *pg.DB, m *orm.Table) error { +func verifyModel(ctx context.Context, db *pg.DB, schemaName string, m *orm.Table) error { tableName := stripQuotes(m.SQLNameForSelects) - var exists bool - _, err := db.QueryOneContext(ctx, pg.Scan(&exists), `SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema='public' AND table_name=?)`, tableName) + exists, err := tableExists(ctx, db, schemaName, tableName) if err != nil { return xerrors.Errorf("querying table: %v", err) } @@ -240,7 +285,7 @@ func verifyModel(ctx context.Context, db *pg.DB, m *orm.Table) error { for _, fld := range m.Fields { var datatype string - _, err := db.QueryOne(pg.Scan(&datatype), `SELECT data_type FROM information_schema.columns WHERE table_schema='public' AND table_name=? AND column_name=?`, tableName, fld.SQLName) + _, err := db.QueryOne(pg.Scan(&datatype), `SELECT data_type FROM information_schema.columns WHERE table_schema=? AND table_name=? AND column_name=?`, schemaName, tableName, fld.SQLName) if err != nil { if errors.Is(err, pg.ErrNoRows) { return xerrors.Errorf("required column %s.%s not found", tableName, fld.SQLName) @@ -248,7 +293,7 @@ func verifyModel(ctx context.Context, db *pg.DB, m *orm.Table) error { return xerrors.Errorf("querying field: %v %T", err, err) } if datatype == "USER-DEFINED" { - _, err := db.QueryOne(pg.Scan(&datatype), `SELECT udt_name FROM information_schema.columns WHERE table_schema='public' AND table_name=? AND column_name=?`, tableName, fld.SQLName) + _, err := db.QueryOne(pg.Scan(&datatype), `SELECT udt_name FROM information_schema.columns WHERE table_schema=? AND table_name=? AND column_name=?`, schemaName, tableName, fld.SQLName) if err != nil { if errors.Is(err, pg.ErrNoRows) { return xerrors.Errorf("required column %s.%s not found", tableName, fld.SQLName) @@ -273,13 +318,23 @@ func verifyModel(ctx context.Context, db *pg.DB, m *orm.Table) error { return nil } +func tableExists(ctx context.Context, db *pg.DB, schemaName string, tableName string) (bool, error) { + var exists bool + _, err := db.QueryOneContext(ctx, pg.Scan(&exists), `SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_schema=? AND table_name=?)`, schemaName, tableName) + if err != nil { + return false, xerrors.Errorf("querying table: %v", err) + } + + return exists, nil +} + func stripQuotes(s types.Safe) string { return strings.Trim(string(s), `"`) } // PersistBatch persists a batch of persistables in a single transaction func (d *Database) PersistBatch(ctx context.Context, ps ...model.Persistable) error { - return d.DB.RunInTransaction(ctx, func(tx *pg.Tx) error { + return d.db.RunInTransaction(ctx, func(tx *pg.Tx) error { txs := &TxStorage{ tx: tx, upsert: d.Upsert, @@ -295,6 +350,10 @@ func (d *Database) PersistBatch(ctx context.Context, ps ...model.Persistable) er }) } +func (d *Database) ExecContext(c context.Context, query interface{}, params ...interface{}) (pg.Result, error) { + return d.db.ExecContext(c, query, params...) +} + type TxStorage struct { tx *pg.Tx upsert bool diff --git a/storage/sql_test.go b/storage/sql_test.go index 55ee90832..92444ae0f 100644 --- a/storage/sql_test.go +++ b/storage/sql_test.go @@ -25,7 +25,7 @@ func TestConsistentSchemaMigrationSequence(t *testing.T) { coll, err := collectionForVersion(latestVersion) require.NoError(t, err) - err = checkMigrationSequence(context.Background(), coll, 1, latestVersion.Patch) + err = checkMigrationSequence(context.Background(), coll, 0, latestVersion.Patch) require.NoError(t, err) } @@ -45,7 +45,7 @@ func TestSchemaIsCurrent(t *testing.T) { model := m t.Run(fmt.Sprintf("%T", model), func(t *testing.T) { q := db.Model(model) - err := verifyModel(ctx, db, q.TableModel().Table()) + err := verifyModel(ctx, db, "public", q.TableModel().Table()) if err != nil { t.Errorf("%v", err) ctq := orm.NewCreateTableQuery(q, &orm.CreateTableOptions{IfNotExists: true}) @@ -72,7 +72,7 @@ func TestModelUpsert(t *testing.T) { // database disallowing upserting d := &Database{ - DB: db, + db: db, Clock: testutil.NewMockClock(), Upsert: false, } @@ -124,11 +124,11 @@ func TestModelUpsert(t *testing.T) { func TestLongNames(t *testing.T) { justLongEnough := strings.Repeat("x", MaxPostgresNameLength) - _, err := NewDatabase(context.Background(), "postgres://example.com/fakedb", 1, justLongEnough, false) + _, err := NewDatabase(context.Background(), "postgres://example.com/fakedb", 1, justLongEnough, "public", false) require.NoError(t, err) tooLong := strings.Repeat("x", MaxPostgresNameLength+1) - _, err = NewDatabase(context.Background(), "postgres://example.com/fakedb", 1, tooLong, false) + _, err = NewDatabase(context.Background(), "postgres://example.com/fakedb", 1, tooLong, "public", false) require.Error(t, err) } @@ -206,7 +206,7 @@ func TestDatabasePersistWithVersion(t *testing.T) { require.NoError(t, err, "creating versioned_model") d := &Database{ - DB: db, + db: db, Clock: testutil.NewMockClock(), version: version, } @@ -269,7 +269,7 @@ func TestDatabaseUpsertWithVersion(t *testing.T) { require.NoError(t, err, "creating versioned_model") d := &Database{ - DB: db, + db: db, Clock: testutil.NewMockClock(), Upsert: true, version: version, @@ -339,7 +339,7 @@ func TestDatabasePersistWithUnsupportedVersion(t *testing.T) { } d := &Database{ - DB: db, + db: db, Clock: testutil.NewMockClock(), version: model.Version{Major: 1}, // model did not exist in this version } diff --git a/tasks/actorstate/actorstate.go b/tasks/actorstate/actorstate.go index c507b3ee1..48c514910 100644 --- a/tasks/actorstate/actorstate.go +++ b/tasks/actorstate/actorstate.go @@ -2,6 +2,7 @@ package actorstate import ( "context" + "strings" "sync" "github.com/filecoin-project/go-address" @@ -45,7 +46,7 @@ type ActorStateAPI interface { StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) // TODO(remove): StateMinerSectors loads the actor and then calls miner.Load which StorageMinerExtractor already has available - //StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) + // StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) Store() adt.Store } @@ -91,3 +92,19 @@ func ActorNameByCode(code cid.Cid) string { } return builtin4.ActorNameByCode(code) } + +func ActorFamily(name string) string { + if name == "" { + return "" + } + + if !strings.HasPrefix(name, "fil/") { + return "" + } + idx := strings.LastIndex(name, "/") + if idx == -1 { + return "" + } + + return name[idx+1:] +} diff --git a/tasks/actorstate/genesis_test.go b/tasks/actorstate/genesis_test.go index e78373705..883bc1239 100644 --- a/tasks/actorstate/genesis_test.go +++ b/tasks/actorstate/genesis_test.go @@ -61,7 +61,9 @@ func TestGenesisProcessor(t *testing.T) { apitest.MineUntilBlock(ctx, t, nodes[0], sn[0], nil) t.Logf("initializing genesis processor") - d := &storage.Database{DB: db} + d, err := storage.NewDatabaseFromDB(ctx, db, "public") + require.NoError(t, err, "new database") + p := NewGenesisProcessor(d, openedAPI) t.Logf("processing") diff --git a/tasks/actorstate/init.go b/tasks/actorstate/init.go index a4ef04fc5..5b0f8ed67 100644 --- a/tasks/actorstate/init.go +++ b/tasks/actorstate/init.go @@ -44,6 +44,7 @@ func (InitExtractor) Extract(ctx context.Context, a ActorInfo, node ActorStateAP return err } out = append(out, &initmodel.IdAddress{ + Height: int64(a.Epoch), ID: idAddr.String(), Address: addr.String(), StateRoot: a.ParentStateRoot.String(), diff --git a/tasks/chaineconomics/economics.go b/tasks/chaineconomics/economics.go index 4d52e7df0..04ea9975a 100644 --- a/tasks/chaineconomics/economics.go +++ b/tasks/chaineconomics/economics.go @@ -38,11 +38,13 @@ func ExtractChainEconomicsModel(ctx context.Context, node ChainEconomicsLens, ts } return &chainmodel.ChainEconomics{ - ParentStateRoot: ts.ParentState().String(), - VestedFil: supply.FilVested.String(), - MinedFil: supply.FilMined.String(), - BurntFil: supply.FilBurnt.String(), - LockedFil: supply.FilLocked.String(), - CirculatingFil: supply.FilCirculating.String(), + Height: int64(ts.Height()), + ParentStateRoot: ts.ParentState().String(), + VestedFil: supply.FilVested.String(), + MinedFil: supply.FilMined.String(), + BurntFil: supply.FilBurnt.String(), + LockedFil: supply.FilLocked.String(), + CirculatingFil: supply.FilCirculating.String(), + FilReserveDisbursed: supply.FilReserveDisbursed.String(), }, nil } diff --git a/tasks/chaineconomics/economics_test.go b/tasks/chaineconomics/economics_test.go index 4c97e87da..b4f334556 100644 --- a/tasks/chaineconomics/economics_test.go +++ b/tasks/chaineconomics/economics_test.go @@ -27,11 +27,12 @@ func TestEconomicsModelExtraction(t *testing.T) { expectedTs := testutil.FakeTipset(t) expectedCircSupply := api.CirculatingSupply{ - FilVested: abi.NewTokenAmount(1), - FilMined: abi.NewTokenAmount(2), - FilBurnt: abi.NewTokenAmount(3), - FilLocked: abi.NewTokenAmount(4), - FilCirculating: abi.NewTokenAmount(5), + FilVested: abi.NewTokenAmount(1), + FilMined: abi.NewTokenAmount(2), + FilBurnt: abi.NewTokenAmount(3), + FilLocked: abi.NewTokenAmount(4), + FilCirculating: abi.NewTokenAmount(5), + FilReserveDisbursed: abi.NewTokenAmount(6), } mockedLens := new(MockedChainEconomicsLens) @@ -45,4 +46,5 @@ func TestEconomicsModelExtraction(t *testing.T) { assert.EqualValues(t, expectedCircSupply.FilVested.String(), em.VestedFil) assert.EqualValues(t, expectedCircSupply.FilLocked.String(), em.LockedFil) assert.EqualValues(t, expectedCircSupply.FilCirculating.String(), em.CirculatingFil) + assert.EqualValues(t, expectedCircSupply.FilReserveDisbursed.String(), em.FilReserveDisbursed) } diff --git a/tasks/messages/message.go b/tasks/messages/message.go index ea131ba15..f6207c30b 100644 --- a/tasks/messages/message.go +++ b/tasks/messages/message.go @@ -191,6 +191,7 @@ func (p *Task) ProcessMessages(ctx context.Context, ts *types.TipSet, pts *types } receiptResults = append(receiptResults, rcpt) + actorName := actorstate.ActorNameByCode(m.ToActorCode) gasOutput := &derivedmodel.GasOutputs{ Height: int64(m.Height), Cid: m.Cid.String(), @@ -214,7 +215,8 @@ func (p *Task) ProcessMessages(ctx context.Context, ts *types.TipSet, pts *types Refund: m.GasOutputs.Refund.String(), GasRefund: m.GasOutputs.GasRefund, GasBurned: m.GasOutputs.GasBurned, - ActorName: actorstate.ActorNameByCode(m.ToActorCode), + ActorName: actorName, + ActorFamily: actorstate.ActorFamily(actorName), } gasOutputsResults = append(gasOutputsResults, gasOutput) diff --git a/tasks/views/chainvis.go b/tasks/views/chainvis.go index 82d3c0082..b4e8bebf3 100644 --- a/tasks/views/chainvis.go +++ b/tasks/views/chainvis.go @@ -43,7 +43,7 @@ func (r *ChainVisRefresher) Run(ctx context.Context) error { func (r *ChainVisRefresher) refreshView(ctx context.Context) (bool, error) { for _, v := range chainVisViews { - _, err := r.db.DB.ExecContext(ctx, fmt.Sprintf("REFRESH MATERIALIZED VIEW %s;", v)) + _, err := r.db.ExecContext(ctx, fmt.Sprintf("REFRESH MATERIALIZED VIEW %s;", v)) if err != nil { return true, xerrors.Errorf("refresh %s: %w", v, err) }