diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4479c1cfc8b..039ea96388b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -20,7 +20,7 @@ write a little note why. - [ ] Targeted PR against correct branch (see [CONTRIBUTING.md](https://github.com/cosmos/ibc-go/blob/master/CONTRIBUTING.md#pr-targeting)) - [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work. -- [ ] Code follows the [module structure standards](https://github.com/cosmos/cosmos-sdk/blob/main/docs/docs/building-modules/10-structure.md). +- [ ] Code follows the [module structure standards](https://github.com/cosmos/cosmos-sdk/blob/main/docs/docs/building-modules/11-structure.md). - [ ] Wrote unit and integration [tests](https://github.com/cosmos/ibc-go/blob/master/CONTRIBUTING.md#testing) - [ ] Updated relevant documentation (`docs/`) or specification (`x//spec/`) - [ ] Added relevant `godoc` [comments](https://blog.golang.org/godoc-documenting-go-code). diff --git a/README.md b/README.md index f4a830c5149..74d030fa015 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,8 @@ The Inter-Blockchain Communication protocol (IBC) allows blockchains to talk to 3.1 [ICS 07 Tendermint](https://github.com/cosmos/ibc-go/tree/main/modules/light-clients/07-tendermint) 3.2 [ICS 06 Solo Machine](https://github.com/cosmos/ibc-go/tree/main/modules/light-clients/06-solomachine) + + 3.3 [DIP 01 Dymint](https://github.com/dymensionxyz/ibc-go/tree/main-dym/modules/light-clients/01-dymint) Note: The localhost client is currently non-functional. diff --git a/docs/client/swagger-ui/swagger.yaml b/docs/client/swagger-ui/swagger.yaml index b11b1e21c36..bfdd160b1f0 100644 --- a/docs/client/swagger-ui/swagger.yaml +++ b/docs/client/swagger-ui/swagger.yaml @@ -5466,6 +5466,331 @@ paths: format: boolean tags: - Query + '/ibc/core/client/v1/consensus_states/{client_id}/heights': + get: + summary: >- + ConsensusStateHeights queries the height of every consensus states + associated with a given client. + operationId: ConsensusStateHeights + responses: + '200': + description: A successful response. + schema: + type: object + properties: + consensus_state_heights: + type: array + items: + type: object + properties: + revision_number: + type: string + format: uint64 + title: the revision that the client is currently on + revision_height: + type: string + format: uint64 + title: the height within the given revision + description: >- + Normally the RevisionHeight is incremented at each height + while keeping + + RevisionNumber the same. However some consensus algorithms + may choose to + + reset the height in certain conditions e.g. hard forks, + state-machine + + breaking changes In these cases, the RevisionNumber is + incremented so that + + height continues to be monitonically increasing even as the + RevisionHeight + + gets reset + title: >- + Height is a monotonically increasing data type + + that can be compared against another Height for the purposes + of updating and + + freezing clients + title: consensus state heights + pagination: + title: pagination response + type: object + properties: + next_key: + type: string + format: byte + title: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: |- + QueryConsensusStateHeightsResponse is the response type for the + Query/ConsensusStateHeights RPC method + default: + description: An unexpected error response + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := ptypes.MarshalAny(foo) + ... + foo := &pb.Foo{} + if err := ptypes.UnmarshalAny(any, foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + ==== + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: client_id + description: client identifier + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + format: boolean + tags: + - Query '/ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}': get: summary: >- diff --git a/docs/ibc/integration.md b/docs/ibc/integration.md index fba5798eadd..c0bc48c33a3 100644 --- a/docs/ibc/integration.md +++ b/docs/ibc/integration.md @@ -139,7 +139,7 @@ func NewApp(...args) *App { ### Module Managers -In order to use IBC, we need to add the new modules to the module `Manager` and to the `SimulationManager` in case your application supports [simulations](https://github.com/cosmos/cosmos-sdk/blob/main/docs/docs/building-modules/13-simulator.md). +In order to use IBC, we need to add the new modules to the module `Manager` and to the `SimulationManager` in case your application supports [simulations](https://github.com/cosmos/cosmos-sdk/blob/main/docs/docs/building-modules/14-simulator.md). ```go // app.go @@ -178,7 +178,7 @@ connection handhake. The IBC module also has [`BeginBlock`](https://github.com/cosmos/ibc-go/blob/main/modules/core/02-client/abci.go) logic as well. This is optional as it is only required if your application uses the [localhost -client](https://github.com/cosmos/ibc/blob/master/spec/client/ics-009-loopback-client) to connect two +client](https://github.com/cosmos/ibc/tree/main/spec/client/ics-009-loopback-cilent) to connect two different modules from the same chain. ::: tip diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md index c7f1b1e9652..038c7c5bf0c 100644 --- a/docs/ibc/proto-docs.md +++ b/docs/ibc/proto-docs.md @@ -297,6 +297,13 @@ - [ibc/core/types/v1/genesis.proto](#ibc/core/types/v1/genesis.proto) - [GenesisState](#ibc.core.types.v1.GenesisState) +- [ibc/lightclients/dymint/dymint.proto](#ibc/lightclients/dymint/dymint.proto) + - [ClientState](#ibc.lightclients.dymint.ClientState) + - [ConsensusState](#ibc.lightclients.dymint.ConsensusState) + - [Fraction](#ibc.lightclients.dymint.Fraction) + - [Header](#ibc.lightclients.dymint.Header) + - [Misbehaviour](#ibc.lightclients.dymint.Misbehaviour) + - [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto) - [ClientState](#ibc.lightclients.localhost.v1.ClientState) @@ -4357,6 +4364,127 @@ GenesisState defines the ibc module's genesis state. + + + + + + + + + + + +

Top

+ +## ibc/lightclients/dymint/dymint.proto + + + + + +### ClientState +ClientState from Dymint tracks the current validator set, latest height, +and a possible frozen height. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `chain_id` | [string](#string) | | | +| `trust_level` | [Fraction](#ibc.lightclients.dymint.Fraction) | | | +| `trusting_period` | [google.protobuf.Duration](#google.protobuf.Duration) | | duration of the period since the LastestTimestamp during which the submitted headers are valid for upgrade | +| `unbonding_period` | [google.protobuf.Duration](#google.protobuf.Duration) | | duration of the staking unbonding period | +| `max_clock_drift` | [google.protobuf.Duration](#google.protobuf.Duration) | | defines how much new (untrusted) header's Time can drift into the future. | +| `frozen_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | Block height when the client was frozen due to a misbehaviour | +| `latest_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | Latest height the client was updated to | +| `proof_specs` | [ics23.ProofSpec](#ics23.ProofSpec) | repeated | Proof specifications used in verifying counterparty state | +| `upgrade_path` | [string](#string) | repeated | Path at which next upgraded client will be committed. Each element corresponds to the key for a single CommitmentProof in the chained proof. NOTE: ClientState must stored under `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using the default upgrade module, upgrade_path should be []string{"upgrade", "upgradedIBCState"}` | + + + + + + + + +### ConsensusState +ConsensusState defines the consensus state from Dymint. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `timestamp` | [google.protobuf.Timestamp](#google.protobuf.Timestamp) | | timestamp that corresponds to the block height in which the ConsensusState was stored. | +| `root` | [ibc.core.commitment.v1.MerkleRoot](#ibc.core.commitment.v1.MerkleRoot) | | commitment root (i.e app hash) | +| `next_validators_hash` | [bytes](#bytes) | | | + + + + + + + + +### Fraction +Fraction defines the protobuf message type for tmmath.Fraction that only +supports positive values. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `numerator` | [uint64](#uint64) | | | +| `denominator` | [uint64](#uint64) | | | + + + + + + + + +### Header +Header defines the Dymint client consensus Header. +It encapsulates all the information necessary to update from a trusted +Dymint ConsensusState. The inclusion of TrustedHeight and +TrustedValidators allows this update to process correctly, so long as the +ConsensusState for the TrustedHeight exists, this removes race conditions +among relayers The SignedHeader and ValidatorSet are the new untrusted update +fields for the client. The TrustedHeight is the height of a stored +ConsensusState on the client that will be used to verify the new untrusted +header. The Trusted ConsensusState must be within the unbonding period of +current time in order to correctly verify, and the TrustedValidators must +hash to TrustedConsensusState.NextValidatorsHash since that is the last +trusted validator set at the TrustedHeight. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `signed_header` | [tendermint.types.SignedHeader](#tendermint.types.SignedHeader) | | | +| `validator_set` | [tendermint.types.ValidatorSet](#tendermint.types.ValidatorSet) | | | +| `trusted_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | | +| `trusted_validators` | [tendermint.types.ValidatorSet](#tendermint.types.ValidatorSet) | | | + + + + + + + + +### Misbehaviour +Misbehaviour is a wrapper over two conflicting Headers +that implements Misbehaviour interface expected by ICS-02 + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| `client_id` | [string](#string) | | | +| `header_1` | [Header](#ibc.lightclients.dymint.Header) | | | +| `header_2` | [Header](#ibc.lightclients.dymint.Header) | | | + + + + + diff --git a/docs/migrations/support-denoms-with-slashes.md b/docs/migrations/support-denoms-with-slashes.md index 3bc3d1b6b83..0447cf57d99 100644 --- a/docs/migrations/support-denoms-with-slashes.md +++ b/docs/migrations/support-denoms-with-slashes.md @@ -33,7 +33,6 @@ app.UpgradeKeeper.SetUpgradeHandler("MigrateTraces", // transfer module consensus version has been bumped to 2 return app.mm.RunMigrations(ctx, app.configurator, fromVM) }) - ``` This is only necessary if there are denom traces in the store with incorrect trace information from previously received coins that had a slash in the base denom. However, it is recommended that any chain upgrading to support base denominations with slashes runs this code for safety. diff --git a/go.mod b/go.mod index 6f8a259b662..c947e86ddc0 100644 --- a/go.mod +++ b/go.mod @@ -158,5 +158,6 @@ replace ( // dragonberry replace for ics23 github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23/go v0.8.0 + // protocol buffers replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 ) diff --git a/modules/core/02-client/abci.go b/modules/core/02-client/abci.go index 9fe681e2276..17d3566f9a4 100644 --- a/modules/core/02-client/abci.go +++ b/modules/core/02-client/abci.go @@ -9,7 +9,7 @@ import ( ) // BeginBlocker updates an existing localhost client with the latest block height. -func BeginBlocker(ctx sdk.Context, k keeper.Keeper) { +func BeginBlocker(ctx sdk.Context, k keeper.KeeperI) { plan, found := k.GetUpgradePlan(ctx) if found { // Once we are at the last block this chain will commit, set the upgraded consensus state diff --git a/modules/core/02-client/genesis.go b/modules/core/02-client/genesis.go index 602b13aba42..b0368766585 100644 --- a/modules/core/02-client/genesis.go +++ b/modules/core/02-client/genesis.go @@ -12,7 +12,7 @@ import ( // InitGenesis initializes the ibc client submodule's state from a provided genesis // state. -func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) { +func InitGenesis(ctx sdk.Context, k keeper.KeeperI, gs types.GenesisState) { k.SetParams(ctx, gs.Params) // Set all client metadata first. This will allow client keeper to overwrite client and consensus state keys @@ -54,7 +54,7 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) { // ExportGenesis returns the ibc client submodule's exported genesis. // NOTE: CreateLocalhost should always be false on export since a // created localhost will be included in the exported clients. -func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState { +func ExportGenesis(ctx sdk.Context, k keeper.KeeperI) types.GenesisState { genClients := k.GetAllGenesisClients(ctx) clientsMetadata, err := k.GetAllClientMetadata(ctx, genClients) if err != nil { diff --git a/modules/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go index 81ca1488ba7..e3540dec307 100644 --- a/modules/core/02-client/keeper/keeper.go +++ b/modules/core/02-client/keeper/keeper.go @@ -1,6 +1,7 @@ package keeper import ( + "context" "fmt" "reflect" "strings" @@ -24,6 +25,72 @@ import ( // Keeper represents a type that grants read and write permissions to any client // state information + +type KeeperIForTests interface { + CreateClient(ctx sdk.Context, clientState exported.ClientState, consensusState exported.ConsensusState) (string, error) + ClientStore(ctx sdk.Context, clientID string) sdk.KVStore + SetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height, consensusState exported.ConsensusState) + // SetClientState(ctx types.Context, clientID string, clientState exported.ClientState) + GetLatestClientConsensusState(ctx sdk.Context, clientID string) (exported.ConsensusState, bool) +} +type KeeperI interface { + KeeperIForTests + + ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error + UpgradedConsensusState(context.Context, *types.QueryUpgradedConsensusStateRequest) (*types.QueryUpgradedConsensusStateResponse, error) + GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, error) + + // ClientUnmarshaler interface + MustUnmarshalClientState([]byte) exported.ClientState + MustUnmarshalConsensusState([]byte) exported.ConsensusState + + // ClientState implements the IBC QueryServer interface + ClientState(c context.Context, req *types.QueryClientStateRequest) (*types.QueryClientStateResponse, error) + ClientStates(c context.Context, req *types.QueryClientStatesRequest) (*types.QueryClientStatesResponse, error) + ConsensusState(c context.Context, req *types.QueryConsensusStateRequest) (*types.QueryConsensusStateResponse, error) + ConsensusStates(c context.Context, req *types.QueryConsensusStatesRequest) (*types.QueryConsensusStatesResponse, error) + ConsensusStateHeights(c context.Context, req *types.QueryConsensusStateHeightsRequest) (*types.QueryConsensusStateHeightsResponse, error) + ClientStatus(c context.Context, req *types.QueryClientStatusRequest) (*types.QueryClientStatusResponse, error) + ClientParams(c context.Context, req *types.QueryClientParamsRequest) (*types.QueryClientParamsResponse, error) + UpgradedClientState(c context.Context, req *types.QueryUpgradedClientStateRequest) (*types.QueryUpgradedClientStateResponse, error) + + // GetClientConsensusState(ctx sdk.Context, clientID string) (connection exported.ConsensusState, found bool) + GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool) + + // From genesis.go + GetAllGenesisClients(ctx sdk.Context) types.IdentifiedClientStates + GetAllClientMetadata(ctx sdk.Context, genClients []types.IdentifiedClientState) ([]types.IdentifiedGenesisMetadata, error) + GetAllConsensusStates(ctx sdk.Context) types.ClientsConsensusStates + GetParams(ctx sdk.Context) types.Params + SetParams(ctx sdk.Context, params types.Params) + GetNextClientSequence(ctx sdk.Context) uint64 + SetAllClientMetadata(ctx sdk.Context, genMetadata []types.IdentifiedGenesisMetadata) + SetClientState(ctx sdk.Context, clientID string, clientState exported.ClientState) + SetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height, consensusState exported.ConsensusState) + SetNextClientSequence(ctx sdk.Context, sequence uint64) + + // msg_server.go + CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour exported.Misbehaviour) error + UpgradeClient(ctx sdk.Context, clientID string, upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState, proofUpgradeClient []byte, proofUpgradeConsState []byte) error + UpdateClient(ctx sdk.Context, clientID string, header exported.Header) error + CreateClient(ctx sdk.Context, clientState exported.ClientState, consensusState exported.ConsensusState) (string, error) + + // abci.go + GetUpgradedClient(ctx sdk.Context, planHeight int64) ([]byte, bool) + GetUpgradePlan(ctx sdk.Context) (plan upgradetypes.Plan, havePlan bool) + SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error + GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) + // UpdateClient(ctx sdk.Context, clientID string, header exported.Header) error + MustMarshalConsensusState(consensusState exported.ConsensusState) []byte + + // proposal.go + ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdateProposal) error + HandleUpgradeProposal(ctx sdk.Context, p *types.UpgradeProposal) error + + // testing + MustMarshalClientState(clientState exported.ClientState) []byte +} + type Keeper struct { storeKey storetypes.StoreKey cdc codec.BinaryCodec diff --git a/modules/core/02-client/keeper/keeper_dymint.go b/modules/core/02-client/keeper/keeper_dymint.go new file mode 100644 index 00000000000..10029f29722 --- /dev/null +++ b/modules/core/02-client/keeper/keeper_dymint.go @@ -0,0 +1,71 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + + "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + ibcdtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" +) + +// Keeper represents a type that grants read and write permissions to any client +// state information +type DymintKeeper struct { + Keeper +} + +// NewKeeper creates a new NewKeeper instance +func NewDymintKeeper(cdc codec.BinaryCodec, key storetypes.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper, uk types.UpgradeKeeper) DymintKeeper { + // set KeyTable if it has not already been set + if !paramSpace.HasKeyTable() { + paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable()) + } + + return DymintKeeper{ + Keeper: NewKeeper(cdc, key, paramSpace, sk, uk), + } +} + +// GetSelfConsensusState introspects the (self) past historical info at a given height +// and returns the expected consensus state at that height. +// For now, can only retrieve self consensus states for the current revision +func (k DymintKeeper) GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, error) { + selfHeight, ok := height.(types.Height) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "expected %T, got %T", types.Height{}, height) + } + // check that height revision matches chainID revision + revision := types.ParseChainID(ctx.ChainID()) + if revision != height.GetRevisionNumber() { + return nil, sdkerrors.Wrapf(types.ErrInvalidHeight, "chainID revision number does not match height revision number: expected %d, got %d", revision, height.GetRevisionNumber()) + } + histInfo, found := k.stakingKeeper.GetHistoricalInfo(ctx, int64(selfHeight.RevisionHeight)) + if !found { + return nil, sdkerrors.Wrapf(sdkerrors.ErrNotFound, "no historical info found at height %d", selfHeight.RevisionHeight) + } + + blockHeader, err := histInfo.Header.Marshal() + if err != nil { + return nil, err + } + + //FIXME: refactor this. should be methods of the keeper + sc := ibcdtypes.NewSelfClient() + return sc.GetSelfConsensusStateFromBlocHeader(k.cdc, blockHeader) +} + +// ValidateSelfClient validates the client parameters for a client of the running chain +// This function is only used to validate the client state the counterparty stores for this chain +// Client must be in same revision as the executing chain +func (k DymintKeeper) ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error { + if exported.Dymint != clientState.ClientType() { + return sdkerrors.Wrapf(types.ErrInvalidClient, "invalid client type. expected: %s, got: %s", + exported.Dymint, clientState.ClientType()) + } + sc := ibcdtypes.NewSelfClient() + return sc.ValidateSelfClientState(ctx, k.stakingKeeper.UnbondingTime(ctx), clientState) +} diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go index 19694c9fa91..719e6b5bc12 100644 --- a/modules/core/02-client/keeper/keeper_test.go +++ b/modules/core/02-client/keeper/keeper_test.go @@ -57,7 +57,7 @@ type KeeperTestSuite struct { cdc codec.Codec ctx sdk.Context - keeper *keeper.Keeper + keeper keeper.KeeperI consensusState *ibctmtypes.ConsensusState header *ibctmtypes.Header valSet *tmtypes.ValidatorSet @@ -86,7 +86,7 @@ func (suite *KeeperTestSuite) SetupTest() { suite.cdc = app.AppCodec() suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: height, ChainID: testClientID, Time: now2}) - suite.keeper = &app.IBCKeeper.ClientKeeper + suite.keeper = app.IBCKeeper.ClientKeeper suite.privVal = ibctestingmock.NewPV() pubKey, err := suite.privVal.GetPubKey() diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go index e4ba66760df..01e969ae462 100644 --- a/modules/core/02-client/keeper/migrations.go +++ b/modules/core/02-client/keeper/migrations.go @@ -1,6 +1,8 @@ package keeper import ( + "errors" + sdk "github.com/cosmos/cosmos-sdk/types" v100 "github.com/cosmos/ibc-go/v5/modules/core/02-client/legacy/v100" @@ -8,11 +10,11 @@ import ( // Migrator is a struct for handling in-place store migrations. type Migrator struct { - keeper Keeper + keeper KeeperI } // NewMigrator returns a new Migrator. -func NewMigrator(keeper Keeper) Migrator { +func NewMigrator(keeper KeeperI) Migrator { return Migrator{keeper: keeper} } @@ -23,5 +25,11 @@ func NewMigrator(keeper Keeper) Migrator { // - prunes expired tendermint consensus states // - adds iteration and processed height keys for unexpired tendermint consensus states func (m Migrator) Migrate1to2(ctx sdk.Context) error { - return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc) + //FIXME: support different kinds of clients + cl, ok := m.keeper.(Keeper) + if !ok { + return errors.New("client keeper is not a keeper.Keeper") + } + + return v100.MigrateStore(ctx, cl.storeKey, cl.cdc) } diff --git a/modules/core/02-client/proposal_handler.go b/modules/core/02-client/proposal_handler.go index 5064f860ca7..c34e08a9c43 100644 --- a/modules/core/02-client/proposal_handler.go +++ b/modules/core/02-client/proposal_handler.go @@ -10,7 +10,7 @@ import ( ) // NewClientProposalHandler defines the 02-client proposal handler -func NewClientProposalHandler(k keeper.Keeper) govtypes.Handler { +func NewClientProposalHandler(k keeper.KeeperI) govtypes.Handler { return func(ctx sdk.Context, content govtypes.Content) error { switch c := content.(type) { case *types.ClientUpdateProposal: diff --git a/modules/core/02-client/types/params.go b/modules/core/02-client/types/params.go index b45659c6376..bb9c97420f4 100644 --- a/modules/core/02-client/types/params.go +++ b/modules/core/02-client/types/params.go @@ -10,8 +10,8 @@ import ( ) var ( - // DefaultAllowedClients are "06-solomachine" and "07-tendermint" - DefaultAllowedClients = []string{exported.Solomachine, exported.Tendermint} + // DefaultAllowedClients are "01-dymint", "06-solomachine" and "07-tendermint" + DefaultAllowedClients = []string{exported.Dymint, exported.Solomachine, exported.Tendermint} // KeyAllowedClients is store's key for AllowedClients Params KeyAllowedClients = []byte("AllowedClients") diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go index 4dce203bea4..cbe7e104bf0 100644 --- a/modules/core/exported/client.go +++ b/modules/core/exported/client.go @@ -1,6 +1,8 @@ package exported import ( + "time" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" proto "github.com/gogo/protobuf/proto" @@ -19,6 +21,9 @@ const ( // Tendermint is used to indicate that the client uses the Tendermint Consensus Algorithm. Tendermint string = "07-tendermint" + // Dymint is used to indicate that the client is a Dymension rollapp. + Dymint string = "01-dymint" + // Localhost is the client type for a localhost client. It is also used as the clientID // for the localhost client. Localhost string = "09-localhost" @@ -241,3 +246,19 @@ type GenesisMetadata interface { func (s Status) String() string { return string(s) } + +// SelfClient is an interface to create the chains' self client logic +type SelfClient interface { + ClientType() string + + ValidateSelfClientState( + ctx sdk.Context, + expectedUbdPeriod time.Duration, + clientState ClientState, + ) error + + GetSelfConsensusStateFromBlocHeader( + cdc codec.BinaryCodec, + blockHeader []byte, + ) (ConsensusState, error) +} diff --git a/modules/core/ibc_msg_interceptor.go b/modules/core/ibc_msg_interceptor.go new file mode 100644 index 00000000000..c72812a0328 --- /dev/null +++ b/modules/core/ibc_msg_interceptor.go @@ -0,0 +1,77 @@ +package ibc + +import ( + "context" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/v5/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" +) + +type IBCMsgI interface { + ///////////////////////////////////////////////////////////////////////////// + // Keeper + ///////////////////////////////////////////////////////////////////////////// + + // CreateClient defines a rpc handler method for MsgCreateClient. + CreateClient(goCtx context.Context, msg *clienttypes.MsgCreateClient) (*clienttypes.MsgCreateClientResponse, error) + + // UpdateClient defines a rpc handler method for MsgUpdateClient. + UpdateClient(goCtx context.Context, msg *clienttypes.MsgUpdateClient) (*clienttypes.MsgUpdateClientResponse, error) + + // UpgradeClient defines a rpc handler method for MsgUpgradeClient. + UpgradeClient(goCtx context.Context, msg *clienttypes.MsgUpgradeClient) (*clienttypes.MsgUpgradeClientResponse, error) + + // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. + SubmitMisbehaviour(goCtx context.Context, msg *clienttypes.MsgSubmitMisbehaviour) (*clienttypes.MsgSubmitMisbehaviourResponse, error) + + // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. + ConnectionOpenInit(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenInit) (*connectiontypes.MsgConnectionOpenInitResponse, error) + + // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. + ConnectionOpenTry(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenTry) (*connectiontypes.MsgConnectionOpenTryResponse, error) + + // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. + ConnectionOpenAck(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenAck) (*connectiontypes.MsgConnectionOpenAckResponse, error) + + // ConnectionOpenConfirm defines a rpc handler method for MsgConnectionOpenConfirm. + ConnectionOpenConfirm(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenConfirm) (*connectiontypes.MsgConnectionOpenConfirmResponse, error) + + // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. + // ChannelOpenInit will perform 04-channel checks, route to the application + // callback, and write an OpenInit channel into state upon successful execution. + ChannelOpenInit(goCtx context.Context, msg *channeltypes.MsgChannelOpenInit) (*channeltypes.MsgChannelOpenInitResponse, error) + + // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. + // ChannelOpenTry will perform 04-channel checks, route to the application + // callback, and write an OpenTry channel into state upon successful execution. + ChannelOpenTry(goCtx context.Context, msg *channeltypes.MsgChannelOpenTry) (*channeltypes.MsgChannelOpenTryResponse, error) + + // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. + // ChannelOpenAck will perform 04-channel checks, route to the application + // callback, and write an OpenAck channel into state upon successful execution. + ChannelOpenAck(goCtx context.Context, msg *channeltypes.MsgChannelOpenAck) (*channeltypes.MsgChannelOpenAckResponse, error) + + // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. + // ChannelOpenConfirm will perform 04-channel checks, route to the application + // callback, and write an OpenConfirm channel into state upon successful execution. + ChannelOpenConfirm(goCtx context.Context, msg *channeltypes.MsgChannelOpenConfirm) (*channeltypes.MsgChannelOpenConfirmResponse, error) + + // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. + ChannelCloseInit(goCtx context.Context, msg *channeltypes.MsgChannelCloseInit) (*channeltypes.MsgChannelCloseInitResponse, error) + + // ChannelCloseConfirm defines a rpc handler method for MsgChannelCloseConfirm. + ChannelCloseConfirm(goCtx context.Context, msg *channeltypes.MsgChannelCloseConfirm) (*channeltypes.MsgChannelCloseConfirmResponse, error) + + // RecvPacket defines a rpc handler method for MsgRecvPacket. + RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacket) (*channeltypes.MsgRecvPacketResponse, error) + + // Timeout defines a rpc handler method for MsgTimeout. + Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*channeltypes.MsgTimeoutResponse, error) + + // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. + TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeoutOnClose) (*channeltypes.MsgTimeoutOnCloseResponse, error) + + // Acknowledgement defines a rpc handler method for MsgAcknowledgement. + Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAcknowledgement) (*channeltypes.MsgAcknowledgementResponse, error) +} diff --git a/modules/core/keeper/keeper.go b/modules/core/keeper/keeper.go index 65b23fc2339..8d7e2c58519 100644 --- a/modules/core/keeper/keeper.go +++ b/modules/core/keeper/keeper.go @@ -28,7 +28,7 @@ type Keeper struct { cdc codec.BinaryCodec - ClientKeeper clientkeeper.Keeper + ClientKeeper clientkeeper.KeeperI ConnectionKeeper connectionkeeper.Keeper ChannelKeeper channelkeeper.Keeper PortKeeper portkeeper.Keeper diff --git a/modules/core/keeper/keeper_dymint.go b/modules/core/keeper/keeper_dymint.go new file mode 100644 index 00000000000..9f28877a6cc --- /dev/null +++ b/modules/core/keeper/keeper_dymint.go @@ -0,0 +1,60 @@ +package keeper + +import ( + "fmt" + "reflect" + + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + + clientkeeper "github.com/cosmos/ibc-go/v5/modules/core/02-client/keeper" + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + connectionkeeper "github.com/cosmos/ibc-go/v5/modules/core/03-connection/keeper" + connectiontypes "github.com/cosmos/ibc-go/v5/modules/core/03-connection/types" + channelkeeper "github.com/cosmos/ibc-go/v5/modules/core/04-channel/keeper" + portkeeper "github.com/cosmos/ibc-go/v5/modules/core/05-port/keeper" +) + +// NewKeeper creates a new ibc Keeper +func NewKeeperWithDymint( + cdc codec.BinaryCodec, key storetypes.StoreKey, paramSpace paramtypes.Subspace, + stakingKeeper clienttypes.StakingKeeper, upgradeKeeper clienttypes.UpgradeKeeper, + scopedKeeper capabilitykeeper.ScopedKeeper, +) *Keeper { + + // register paramSpace at top level keeper + // set KeyTable if it has not already been set + if !paramSpace.HasKeyTable() { + keyTable := clienttypes.ParamKeyTable() + keyTable.RegisterParamSet(&connectiontypes.Params{}) + paramSpace = paramSpace.WithKeyTable(keyTable) + } + + // panic if any of the keepers passed in is empty + if reflect.ValueOf(stakingKeeper).IsZero() { + panic(fmt.Errorf("cannot initialize IBC keeper: empty staking keeper")) + } + + if reflect.ValueOf(upgradeKeeper).IsZero() { + panic(fmt.Errorf("cannot initialize IBC keeper: empty upgrade keeper")) + } + + if reflect.DeepEqual(capabilitykeeper.ScopedKeeper{}, scopedKeeper) { + panic(fmt.Errorf("cannot initialize IBC keeper: empty scoped keeper")) + } + + clientKeeper := clientkeeper.NewDymintKeeper(cdc, key, paramSpace, stakingKeeper, upgradeKeeper) + connectionKeeper := connectionkeeper.NewKeeper(cdc, key, paramSpace, clientKeeper) + portKeeper := portkeeper.NewKeeper(scopedKeeper) + channelKeeper := channelkeeper.NewKeeper(cdc, key, clientKeeper, connectionKeeper, portKeeper, scopedKeeper) + + return &Keeper{ + cdc: cdc, + ClientKeeper: clientKeeper, + ConnectionKeeper: connectionKeeper, + ChannelKeeper: channelKeeper, + PortKeeper: portKeeper, + } +} diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go index ca56594b113..cbcfd0ea157 100644 --- a/modules/core/keeper/migrations.go +++ b/modules/core/keeper/migrations.go @@ -1,6 +1,8 @@ package keeper import ( + "errors" + sdk "github.com/cosmos/cosmos-sdk/types" clientkeeper "github.com/cosmos/ibc-go/v5/modules/core/02-client/keeper" @@ -23,7 +25,13 @@ func NewMigrator(keeper Keeper) Migrator { // - prunes expired tendermint consensus states // - adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states func (m Migrator) Migrate1to2(ctx sdk.Context) error { - clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper) + + //FIXME: support different kinds of clients + cl, ok := m.keeper.ClientKeeper.(clientkeeper.Keeper) + if !ok { + return errors.New("client keeper is not a keeper.Keeper") + } + clientMigrator := clientkeeper.NewMigrator(cl) if err := clientMigrator.Migrate1to2(ctx); err != nil { return err } diff --git a/modules/core/module.go b/modules/core/module.go index 19b5d445fb7..00be3d6b386 100644 --- a/modules/core/module.go +++ b/modules/core/module.go @@ -97,16 +97,27 @@ func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) // AppModule implements an application module for the ibc module. type AppModule struct { AppModuleBasic - keeper *keeper.Keeper + keeper *keeper.Keeper + msgSrvInterceptor IBCMsgI // create localhost by default createLocalhost bool } +// NewAppModuleWithMsgInterceptor creates a new AppModule object +// with customized message server for IBC messages +func NewAppModuleWithMsgInterceptor(k *keeper.Keeper, msgInterceptor IBCMsgI) AppModule { + return AppModule{ + keeper: k, + msgSrvInterceptor: msgInterceptor, + } +} + // NewAppModule creates a new AppModule object func NewAppModule(k *keeper.Keeper) AppModule { return AppModule{ - keeper: k, + keeper: k, + msgSrvInterceptor: k, } } @@ -137,9 +148,10 @@ func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sd // RegisterServices registers module services. func (am AppModule) RegisterServices(cfg module.Configurator) { - clienttypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) - connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) - channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) + clienttypes.RegisterMsgServer(cfg.MsgServer(), am.msgSrvInterceptor) + connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.msgSrvInterceptor) + channeltypes.RegisterMsgServer(cfg.MsgServer(), am.msgSrvInterceptor) + types.RegisterQueryService(cfg.QueryServer(), am.keeper) m := clientkeeper.NewMigrator(am.keeper.ClientKeeper) diff --git a/modules/core/types/codec.go b/modules/core/types/codec.go index d597141005f..83abfb549a3 100644 --- a/modules/core/types/codec.go +++ b/modules/core/types/codec.go @@ -7,6 +7,7 @@ import ( connectiontypes "github.com/cosmos/ibc-go/v5/modules/core/03-connection/types" channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + ibcdmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" solomachinetypes "github.com/cosmos/ibc-go/v5/modules/light-clients/06-solomachine/types" ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" localhosttypes "github.com/cosmos/ibc-go/v5/modules/light-clients/09-localhost/types" @@ -17,6 +18,7 @@ func RegisterInterfaces(registry codectypes.InterfaceRegistry) { clienttypes.RegisterInterfaces(registry) connectiontypes.RegisterInterfaces(registry) channeltypes.RegisterInterfaces(registry) + ibcdmtypes.RegisterInterfaces(registry) solomachinetypes.RegisterInterfaces(registry) ibctmtypes.RegisterInterfaces(registry) localhosttypes.RegisterInterfaces(registry) diff --git a/modules/light-clients/01-dymint/doc.go b/modules/light-clients/01-dymint/doc.go new file mode 100644 index 00000000000..e0ac5ac9907 --- /dev/null +++ b/modules/light-clients/01-dymint/doc.go @@ -0,0 +1,5 @@ +/* +Package dymint implements a concrete `ConsensusState`, `Header`, +`Misbehaviour` and `Equivocation` types for the dymint consensus light client. +*/ +package dymint diff --git a/modules/light-clients/01-dymint/module.go b/modules/light-clients/01-dymint/module.go new file mode 100644 index 00000000000..439d5dabd14 --- /dev/null +++ b/modules/light-clients/01-dymint/module.go @@ -0,0 +1,10 @@ +package dymint + +import ( + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" +) + +// Name returns the IBC client name +func Name() string { + return types.SubModuleName +} diff --git a/modules/light-clients/01-dymint/types/client_state.go b/modules/light-clients/01-dymint/types/client_state.go new file mode 100644 index 00000000000..8d1bcff0c07 --- /dev/null +++ b/modules/light-clients/01-dymint/types/client_state.go @@ -0,0 +1,565 @@ +package types + +import ( + "strings" + "time" + + ics23 "github.com/confio/ics23/go" + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/v5/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ exported.ClientState = (*ClientState)(nil) + +// NewClientState creates a new ClientState instance +func NewClientState( + chainID string, + trustingPeriod, maxClockDrift time.Duration, + latestHeight clienttypes.Height, specs []*ics23.ProofSpec, + upgradePath []string, +) *ClientState { + return &ClientState{ + ChainId: chainID, + TrustingPeriod: trustingPeriod, + MaxClockDrift: maxClockDrift, + LatestHeight: latestHeight, + FrozenHeight: clienttypes.ZeroHeight(), + ProofSpecs: specs, + UpgradePath: upgradePath, + } +} + +// GetChainID returns the chain-id +func (cs ClientState) GetChainID() string { + return cs.ChainId +} + +// ClientType is dymint. +func (cs ClientState) ClientType() string { + return exported.Dymint +} + +// GetLatestHeight returns latest block height. +func (cs ClientState) GetLatestHeight() exported.Height { + return cs.LatestHeight +} + +// Status returns the status of the dymint client. +// The client may be: +// - Active: FrozenHeight is zero and client is not expired +// - Frozen: Frozen Height is not zero +// - Expired: the latest consensus state timestamp + trusting period <= current time +// +// A frozen client will become expired, so the Frozen status +// has higher precedence. +func (cs ClientState) Status( + ctx sdk.Context, + clientStore sdk.KVStore, + cdc codec.BinaryCodec, +) exported.Status { + if !cs.FrozenHeight.IsZero() { + return exported.Frozen + } + + // get latest consensus state from clientStore to check for expiry + consState, err := GetConsensusState(clientStore, cdc, cs.GetLatestHeight()) + if err != nil { + // if the client state does not have an associated consensus state for its latest height + // then it must be expired + return exported.Expired + } + + if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) { + return exported.Expired + } + + return exported.Active +} + +// IsExpired returns whether or not the client has passed the trusting period since the last +// update (in which case no headers are considered valid). +func (cs ClientState) IsExpired(latestTimestamp, now time.Time) bool { + expirationTime := latestTimestamp.Add(cs.TrustingPeriod) + return !expirationTime.After(now) +} + +// Validate performs a basic validation of the client state fields. +func (cs ClientState) Validate() error { + if strings.TrimSpace(cs.ChainId) == "" { + return sdkerrors.Wrap(ErrInvalidChainID, "chain id cannot be empty string") + } + + // NOTE: the value of tmtypes.MaxChainIDLen may change in the future. + // If this occurs, the code here must account for potential difference + // between the dymint version being run by the counterparty chain + // and the dymint version used by this light client. + // https://github.com/cosmos/ibc-go/issues/177 + if len(cs.ChainId) > tmtypes.MaxChainIDLen { + return sdkerrors.Wrapf(ErrInvalidChainID, "chainID is too long; got: %d, max: %d", len(cs.ChainId), tmtypes.MaxChainIDLen) + } + + if cs.TrustingPeriod == 0 { + return sdkerrors.Wrap(ErrInvalidTrustingPeriod, "trusting period cannot be zero") + } + if cs.MaxClockDrift == 0 { + return sdkerrors.Wrap(ErrInvalidMaxClockDrift, "max clock drift cannot be zero") + } + + // the latest height revision number must match the chain id revision number + if cs.LatestHeight.RevisionNumber != clienttypes.ParseChainID(cs.ChainId) { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, + "latest height revision number must match chain id revision number (%d != %d)", cs.LatestHeight.RevisionNumber, clienttypes.ParseChainID(cs.ChainId)) + } + if cs.LatestHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "dymint client's latest height revision height cannot be zero") + } + + if cs.ProofSpecs == nil { + return sdkerrors.Wrap(ErrInvalidProofSpecs, "proof specs cannot be nil for tm client") + } + for i, spec := range cs.ProofSpecs { + if spec == nil { + return sdkerrors.Wrapf(ErrInvalidProofSpecs, "proof spec cannot be nil at index: %d", i) + } + } + // UpgradePath may be empty, but if it isn't, each key must be non-empty + for i, k := range cs.UpgradePath { + if strings.TrimSpace(k) == "" { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "key in upgrade path at index %d cannot be empty", i) + } + } + + return nil +} + +// GetProofSpecs returns the format the client expects for proof verification +// as a string array specifying the proof type for each position in chained proof +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + return cs.ProofSpecs +} + +// ZeroCustomFields returns a ClientState that is a copy of the current ClientState +// with all client customizable fields zeroed out +func (cs ClientState) ZeroCustomFields() exported.ClientState { + // copy over all chain-specified fields + // and leave custom fields empty + return &ClientState{ + ChainId: cs.ChainId, + LatestHeight: cs.LatestHeight, + ProofSpecs: cs.ProofSpecs, + UpgradePath: cs.UpgradePath, + } +} + +// Initialize will check that initial consensus state is a Dymint consensus state +// and will store ProcessedTime for initial consensus state as ctx.BlockTime() +func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryCodec, clientStore sdk.KVStore, consState exported.ConsensusState) error { + if _, ok := consState.(*ConsensusState); !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid initial consensus state. expected type: %T, got: %T", + &ConsensusState{}, consState) + } + // set metadata for initial consensus state. + setConsensusMetadata(ctx, clientStore, cs.GetLatestHeight()) + return nil +} + +// VerifyClientState verifies a proof of the client state of the running chain +// stored on the target machine +func (cs ClientState) VerifyClientState( + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + prefix exported.Prefix, + counterpartyClientIdentifier string, + proof []byte, + clientState exported.ClientState, +) error { + merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + if clientState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "client state cannot be empty") + } + + // _, ok := clientState.(*ClientState) + // if !ok { + // return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid client type %T, expected %T", clientState, &ClientState{}) + // } + + bz, err := cdc.MarshalInterface(clientState) + if err != nil { + return err + } + + return merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz) +} + +// VerifyClientConsensusState verifies a proof of the consensus state of the +// Dymint client stored on the target machine. +func (cs ClientState) VerifyClientConsensusState( + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + counterpartyClientIdentifier string, + consensusHeight exported.Height, + prefix exported.Prefix, + proof []byte, + consensusState exported.ConsensusState, +) error { + merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + if consensusState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty") + } + + // _, ok := consensusState.(*ConsensusState) + // if !ok { + // return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}) + // } + + bz, err := cdc.MarshalInterface(consensusState) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyConnectionState verifies a proof of the connection state of the +// specified connection end stored on the target machine. +func (cs ClientState) VerifyConnectionState( + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + prefix exported.Prefix, + proof []byte, + connectionID string, + connectionEnd exported.ConnectionI, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connectionID)) + path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath) + if err != nil { + return err + } + + connection, ok := connectionEnd.(connectiontypes.ConnectionEnd) + if !ok { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid connection type %T", connectionEnd) + } + + bz, err := cdc.Marshal(&connection) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyChannelState verifies a proof of the channel state of the specified +// channel end, under the specified port, stored on the target machine. +func (cs ClientState) VerifyChannelState( + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + channel exported.ChannelI, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, channelPath) + if err != nil { + return err + } + + channelEnd, ok := channel.(channeltypes.Channel) + if !ok { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid channel type %T", channel) + } + + bz, err := cdc.Marshal(&channelEnd) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at +// the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketCommitment( + ctx sdk.Context, + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + delayTimePeriod uint64, + delayBlockPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil { + return err + } + + commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, commitmentBytes); err != nil { + return err + } + + return nil +} + +// VerifyPacketAcknowledgement verifies a proof of an incoming packet +// acknowledgement at the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketAcknowledgement( + ctx sdk.Context, + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + delayTimePeriod uint64, + delayBlockPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil { + return err + } + + ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, ackPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, channeltypes.CommitAcknowledgement(acknowledgement)); err != nil { + return err + } + + return nil +} + +// VerifyPacketReceiptAbsence verifies a proof of the absence of an +// incoming packet receipt at the specified port, specified channel, and +// specified sequence. +func (cs ClientState) VerifyPacketReceiptAbsence( + ctx sdk.Context, + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + delayTimePeriod uint64, + delayBlockPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil { + return err + } + + receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyNonMembership(cs.ProofSpecs, consensusState.GetRoot(), path); err != nil { + return err + } + + return nil +} + +// VerifyNextSequenceRecv verifies a proof of the next sequence number to be +// received of the specified channel at the specified port. +func (cs ClientState) VerifyNextSequenceRecv( + ctx sdk.Context, + store sdk.KVStore, + cdc codec.BinaryCodec, + height exported.Height, + delayTimePeriod uint64, + delayBlockPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil { + return err + } + + nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath) + if err != nil { + return err + } + + bz := sdk.Uint64ToBigEndian(nextSequenceRecv) + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// verifyDelayPeriodPassed will ensure that at least delayTimePeriod amount of time and delayBlockPeriod number of blocks have passed +// since consensus state was submitted before allowing verification to continue. +func verifyDelayPeriodPassed(ctx sdk.Context, store sdk.KVStore, proofHeight exported.Height, delayTimePeriod, delayBlockPeriod uint64) error { + // check that executing chain's timestamp has passed consensusState's processed time + delay time period + processedTime, ok := GetProcessedTime(store, proofHeight) + if !ok { + return sdkerrors.Wrapf(ErrProcessedTimeNotFound, "processed time not found for height: %s", proofHeight) + } + currentTimestamp := uint64(ctx.BlockTime().UnixNano()) + validTime := processedTime + delayTimePeriod + // NOTE: delay time period is inclusive, so if currentTimestamp is validTime, then we return no error + if currentTimestamp < validTime { + return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until time: %d, current time: %d", + validTime, currentTimestamp) + } + // check that executing chain's height has passed consensusState's processed height + delay block period + processedHeight, ok := GetProcessedHeight(store, proofHeight) + if !ok { + return sdkerrors.Wrapf(ErrProcessedHeightNotFound, "processed height not found for height: %s", proofHeight) + } + currentHeight := clienttypes.GetSelfHeight(ctx) + validHeight := clienttypes.NewHeight(processedHeight.GetRevisionNumber(), processedHeight.GetRevisionHeight()+delayBlockPeriod) + // NOTE: delay block period is inclusive, so if currentHeight is validHeight, then we return no error + if currentHeight.LT(validHeight) { + return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until height: %s, current height: %s", + validHeight, currentHeight) + } + return nil +} + +// produceVerificationArgs perfoms the basic checks on the arguments that are +// shared between the verification functions and returns the unmarshalled +// merkle proof, the consensus state and an error if one occurred. +func produceVerificationArgs( + store sdk.KVStore, + cdc codec.BinaryCodec, + cs ClientState, + height exported.Height, + prefix exported.Prefix, + proof []byte, +) (merkleProof commitmenttypes.MerkleProof, consensusState *ConsensusState, err error) { + if cs.GetLatestHeight().LT(height) { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf( + sdkerrors.ErrInvalidHeight, + "client state height < proof height (%d < %d), please ensure the client has been updated", cs.GetLatestHeight(), height, + ) + } + + if prefix == nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty") + } + + _, ok := prefix.(*commitmenttypes.MerklePrefix) + if !ok { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected *MerklePrefix", prefix) + } + + if proof == nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "proof cannot be empty") + } + + if err = cdc.Unmarshal(proof, &merkleProof); err != nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "failed to unmarshal proof into commitment merkle proof") + } + + consensusState, err = GetConsensusState(store, cdc, height) + if err != nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(err, "please ensure the proof was constructed against a height that exists on the client") + } + + return merkleProof, consensusState, nil +} diff --git a/modules/light-clients/01-dymint/types/client_state_test.go b/modules/light-clients/01-dymint/types/client_state_test.go new file mode 100644 index 00000000000..6cfc7b7c704 --- /dev/null +++ b/modules/light-clients/01-dymint/types/client_state_test.go @@ -0,0 +1,980 @@ +package types_test + +import ( + "time" + + ics23 "github.com/confio/ics23/go" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v5/modules/core/04-channel/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" + ibcmock "github.com/cosmos/ibc-go/v5/testing/mock" +) + +const ( + testClientID = "clientidone" + testConnectionID = "connectionid" + testPortID = "testportid" + testChannelID = "testchannelid" + testSequence = 1 + + // Do not change the length of these variables + fiftyCharChainID = "12345678901234567890123456789012345678901234567890" + fiftyOneCharChainID = "123456789012345678901234567890123456789012345678901" +) + +var invalidProof = []byte("invalid proof") + +func (suite *DymintTestSuite) TestStatus() { + var ( + path *ibctesting.Path + clientState *types.ClientState + dymintCounterpartyChain *ibctesting.TestChain + endpoint *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expStatus exported.Status + }{ + {"client is active", func() {}, exported.Active}, + {"client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + endpoint.SetClientState(clientState) + }, exported.Frozen}, + {"client status without consensus state", func() { + clientState.LatestHeight = clientState.LatestHeight.Increment().(clienttypes.Height) + endpoint.SetClientState(clientState) + }, exported.Expired}, + {"client status is expired", func() { + suite.coordinator.IncrementTimeBy(clientState.TrustingPeriod) + }, exported.Expired}, + } + + for _, tc := range testCases { + path = ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Tendermint { + // chainA must be Dymint + dymintCounterpartyChain = suite.chainB + endpoint = path.EndpointB + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + endpoint = path.EndpointA + } + + clientStore := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + clientState = endpoint.GetClientState().(*types.ClientState) + + tc.malleate() + + status := clientState.Status(dymintCounterpartyChain.GetContext(), clientStore, dymintCounterpartyChain.App.AppCodec()) + suite.Require().Equal(tc.expStatus, status) + + } +} + +func (suite *DymintTestSuite) TestValidate() { + testCases := []struct { + name string + clientState *types.ClientState + expPass bool + }{ + { + name: "valid client", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: true, + }, + { + name: "valid client with nil upgrade path", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), nil), + expPass: true, + }, + { + name: "invalid chainID", + clientState: types.NewClientState(" ", trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + // NOTE: if this test fails, the code must account for the change in chainID length across tendermint versions! + // Do not only fix the test, fix the code! + // https://github.com/cosmos/ibc-go/issues/177 + name: "valid chainID - chainID validation failed for chainID of length 50! ", + clientState: types.NewClientState(fiftyCharChainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: true, + }, + { + // NOTE: if this test fails, the code must account for the change in chainID length across tendermint versions! + // Do not only fix the test, fix the code! + // https://github.com/cosmos/ibc-go/issues/177 + name: "invalid chainID - chainID validation did not fail for chainID of length 51! ", + clientState: types.NewClientState(fiftyOneCharChainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + name: "invalid trusting period", + clientState: types.NewClientState(chainID, 0, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + name: "invalid max clock drift", + clientState: types.NewClientState(chainID, trustingPeriod, 0, height, commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + name: "invalid revision number", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + name: "invalid revision height", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), upgradePath), + expPass: false, + }, + { + name: "proof specs is nil", + clientState: types.NewClientState(chainID, ubdPeriod, maxClockDrift, height, nil, upgradePath), + expPass: false, + }, + { + name: "proof specs contains nil", + clientState: types.NewClientState(chainID, ubdPeriod, maxClockDrift, height, []*ics23.ProofSpec{ics23.TendermintSpec, nil}, upgradePath), + expPass: false, + }, + } + + for _, tc := range testCases { + err := tc.clientState.Validate() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *DymintTestSuite) TestInitialize() { + var ( + dymintCounterpartyChain *ibctesting.TestChain + endpoint *ibctesting.Endpoint + ) + testCases := []struct { + name string + consensusState exported.ConsensusState + expPass bool + }{ + { + name: "valid consensus", + consensusState: &types.ConsensusState{}, + expPass: true, + }, + { + name: "invalid consensus: consensus state is solomachine consensus", + consensusState: ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(), + expPass: false, + }, + } + + path := ibctesting.NewPath(suite.chainA, suite.chainB) + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Tendermint { + // chainA must be Dymint + dymintCounterpartyChain = suite.chainB + endpoint = path.EndpointB + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + endpoint = path.EndpointA + } + err := endpoint.CreateClient() + suite.Require().NoError(err) + + clientState := dymintCounterpartyChain.GetClientState(endpoint.ClientID) + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + + for _, tc := range testCases { + err := clientState.Initialize(dymintCounterpartyChain.GetContext(), dymintCounterpartyChain.Codec, store, tc.consensusState) + if tc.expPass { + suite.Require().NoError(err, "valid case returned an error") + } else { + suite.Require().Error(err, "invalid case didn't return an error") + } + } +} + +func (suite *DymintTestSuite) TestVerifyClientConsensusState() { + testCases := []struct { + name string + clientState *types.ClientState + consensusState *types.ConsensusState + prefix commitmenttypes.MerklePrefix + proof []byte + expPass bool + }{ + // FIXME: uncomment + // { + // name: "successful verification", + // clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs()), + // consensusState: types.ConsensusState{ + // Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + // }, + // prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + // expPass: true, + // }, + { + name: "ApplyPrefix failed", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + }, + prefix: commitmenttypes.MerklePrefix{}, + expPass: false, + }, + { + name: "latest client height < height", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + }, + prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + expPass: false, + }, + { + name: "proof verification failed", + clientState: types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + NextValidatorsHash: suite.valsHash, + }, + prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + proof: []byte{}, + expPass: false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.clientState.VerifyClientConsensusState( + nil, suite.cdc, height, "chainA", tc.clientState.LatestHeight, tc.prefix, tc.proof, tc.consensusState, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +// test verification of the connection on chainB being represented in the +// light client on chainA +func (suite *DymintTestSuite) TestVerifyConnectionState() { + var ( + clientState *types.ClientState + proof []byte + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintChain, dymintCounterpartyChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTestWithConsensusType(exported.Tendermint, exported.Dymint) // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + connection := endpoint2.GetConnection() + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make connection proof + connectionKey := host.ConnectionKey(endpoint2.ConnectionID) + proof, proofHeight = dymintChain.QueryProof(connectionKey) + + tc.malleate() // make changes as necessary + + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint1.ClientID) + + err := clientState.VerifyConnectionState( + store, dymintCounterpartyChain.Codec, proofHeight, &prefix, proof, endpoint2.ConnectionID, connection, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the channel on chainB being represented in the light +// client on chainA +func (suite *DymintTestSuite) TestVerifyChannelState() { + var ( + clientState *types.ClientState + proof []byte + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintChain, dymintCounterpartyChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTestWithConsensusType(exported.Tendermint, exported.Dymint) // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + channel := endpoint2.GetChannel() + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make channel proof + channelKey := host.ChannelKey(endpoint2.ChannelConfig.PortID, endpoint2.ChannelID) + proof, proofHeight = dymintChain.QueryProof(channelKey) + + tc.malleate() // make changes as necessary + + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint1.ClientID) + + err := clientState.VerifyChannelState( + store, dymintCounterpartyChain.Codec, proofHeight, &prefix, proof, + endpoint2.ChannelConfig.PortID, endpoint2.ChannelID, channel, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the packet commitment on chainB being represented +// in the light client on chainA. A send from chainB to chainA is simulated. +func (suite *DymintTestSuite) TestVerifyPacketCommitment() { + var ( + clientState *types.ClientState + proof []byte + delayTimePeriod uint64 + delayBlockPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintChain, dymintCounterpartyChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay time period has passed", + malleate: func() { + delayTimePeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay time period has not passed", + malleate: func() { + delayTimePeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + name: "delay block period has passed", + malleate: func() { + delayBlockPeriod = 1 + }, + expPass: true, + }, + { + name: "delay block period has not passed", + malleate: func() { + delayBlockPeriod = 1000 + }, + expPass: false, + }, + + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, endpoint2.ChannelConfig.PortID, endpoint2.ChannelID, endpoint1.ChannelConfig.PortID, endpoint1.ChannelID, clienttypes.NewHeight(0, 100), 0) + err := endpoint2.SendPacket(packet) + suite.Require().NoError(err) + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make packet commitment proof + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight = endpoint2.QueryProof(packetKey) + + // reset time and block delays to 0, malleate may change to a specific non-zero value. + delayTimePeriod = 0 + delayBlockPeriod = 0 + tc.malleate() // make changes as necessary + + ctx := dymintCounterpartyChain.GetContext() + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, endpoint1.ClientID) + + commitment := channeltypes.CommitPacket(dymintCounterpartyChain.App.GetIBCKeeper().Codec(), packet) + err = clientState.VerifyPacketCommitment( + ctx, store, dymintCounterpartyChain.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof, + packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the acknowledgement on chainB being represented +// in the light client on chainA. A send and ack from chainA to chainB +// is simulated. +func (suite *DymintTestSuite) TestVerifyPacketAcknowledgement() { + var ( + clientState *types.ClientState + proof []byte + delayTimePeriod uint64 + delayBlockPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintCounterpartyChain, dymintChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay time period has passed", + malleate: func() { + delayTimePeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay time period has not passed", + malleate: func() { + delayTimePeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + name: "delay block period has passed", + malleate: func() { + delayBlockPeriod = 1 + }, + expPass: true, + }, + { + name: "delay block period has not passed", + malleate: func() { + delayBlockPeriod = 10 + }, + expPass: false, + }, + + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, endpoint1.ChannelConfig.PortID, endpoint1.ChannelID, endpoint2.ChannelConfig.PortID, endpoint2.ChannelID, clienttypes.NewHeight(0, 100), 0) + + // send packet + err := endpoint1.SendPacket(packet) + suite.Require().NoError(err) + + // write receipt and ack + err = endpoint2.RecvPacket(packet) + suite.Require().NoError(err) + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make packet acknowledgement proof + acknowledgementKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight = dymintChain.QueryProof(acknowledgementKey) + + // reset time and block delays to 0, malleate may change to a specific non-zero value. + delayTimePeriod = 0 + delayBlockPeriod = 0 + tc.malleate() // make changes as necessary + + ctx := dymintCounterpartyChain.GetContext() + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, endpoint1.ClientID) + + err = clientState.VerifyPacketAcknowledgement( + ctx, store, dymintCounterpartyChain.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement.Acknowledgement(), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the absent acknowledgement on chainB being represented +// in the light client on chainA. A send from chainB to chainA is simulated, but +// no receive. +func (suite *DymintTestSuite) TestVerifyPacketReceiptAbsence() { + var ( + clientState *types.ClientState + proof []byte + delayTimePeriod uint64 + delayBlockPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintChain, dymintCounterpartyChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay time period has passed", + malleate: func() { + delayTimePeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay time period has not passed", + malleate: func() { + delayTimePeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + name: "delay block period has passed", + malleate: func() { + delayBlockPeriod = 1 + }, + expPass: true, + }, + { + name: "delay block period has not passed", + malleate: func() { + delayBlockPeriod = 10 + }, + expPass: false, + }, + + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, endpoint1.ChannelConfig.PortID, endpoint1.ChannelID, endpoint2.ChannelConfig.PortID, endpoint2.ChannelID, clienttypes.NewHeight(0, 100), 0) + + // send packet, but no recv + err := endpoint1.SendPacket(packet) + suite.Require().NoError(err) + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make packet receipt absence proof + receiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight = endpoint2.QueryProof(receiptKey) + + // reset time and block delays to 0, malleate may change to a specific non-zero value. + delayTimePeriod = 0 + delayBlockPeriod = 0 + tc.malleate() // make changes as necessary + + ctx := dymintCounterpartyChain.GetContext() + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, endpoint1.ClientID) + + err = clientState.VerifyPacketReceiptAbsence( + ctx, store, dymintCounterpartyChain.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the next receive sequence on chainB being represented +// in the light client on chainA. A send and receive from chainB to chainA is +// simulated. +func (suite *DymintTestSuite) TestVerifyNextSeqRecv() { + var ( + clientState *types.ClientState + proof []byte + delayTimePeriod uint64 + delayBlockPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + dymintChain, dymintCounterpartyChain *ibctesting.TestChain + endpoint1, endpoint2 *ibctesting.Endpoint + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay time period has passed", + malleate: func() { + delayTimePeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay time period has not passed", + malleate: func() { + delayTimePeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + name: "delay block period has passed", + malleate: func() { + delayBlockPeriod = 1 + }, + expPass: true, + }, + { + name: "delay block period has not passed", + malleate: func() { + delayBlockPeriod = 10 + }, + expPass: false, + }, + + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + path := ibctesting.NewPath(suite.chainA, suite.chainB) + path.SetChannelOrdered() + suite.coordinator.Setup(path) + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainA + dymintChain = suite.chainB + endpoint1 = path.EndpointA + endpoint2 = path.EndpointB + } else { + dymintCounterpartyChain = suite.chainB + dymintChain = suite.chainA + endpoint1 = path.EndpointB + endpoint2 = path.EndpointA + } + + packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, endpoint1.ChannelConfig.PortID, endpoint1.ChannelID, endpoint2.ChannelConfig.PortID, endpoint2.ChannelID, clienttypes.NewHeight(0, 100), 0) + + // send packet + err := endpoint1.SendPacket(packet) + suite.Require().NoError(err) + + // next seq recv incremented + err = endpoint2.RecvPacket(packet) + suite.Require().NoError(err) + + var ok bool + clientStateI := dymintCounterpartyChain.GetClientState(endpoint1.ClientID) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = dymintChain.GetPrefix() + + // make next seq recv proof + nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + proof, proofHeight = dymintChain.QueryProof(nextSeqRecvKey) + + // reset time and block delays to 0, malleate may change to a specific non-zero value. + delayTimePeriod = 0 + delayBlockPeriod = 0 + tc.malleate() // make changes as necessary + + ctx := dymintCounterpartyChain.GetContext() + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, endpoint1.ClientID) + + err = clientState.VerifyNextSequenceRecv( + ctx, store, dymintCounterpartyChain.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+1, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/modules/light-clients/01-dymint/types/codec.go b/modules/light-clients/01-dymint/types/codec.go new file mode 100644 index 00000000000..8156431f505 --- /dev/null +++ b/modules/light-clients/01-dymint/types/codec.go @@ -0,0 +1,28 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// RegisterInterfaces registers the dymint concrete client-related +// implementations and interfaces. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) + registry.RegisterImplementations( + (*exported.ConsensusState)(nil), + &ConsensusState{}, + ) + registry.RegisterImplementations( + (*exported.Header)(nil), + &Header{}, + ) + registry.RegisterImplementations( + (*exported.Misbehaviour)(nil), + &Misbehaviour{}, + ) +} diff --git a/modules/light-clients/01-dymint/types/consensus_state.go b/modules/light-clients/01-dymint/types/consensus_state.go new file mode 100644 index 00000000000..518c9c87675 --- /dev/null +++ b/modules/light-clients/01-dymint/types/consensus_state.go @@ -0,0 +1,53 @@ +package types + +import ( + "time" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// SentinelRoot is used as a stand-in root value for the consensus state set at the upgrade height +const SentinelRoot = "sentinel_root" + +// NewConsensusState creates a new ConsensusState instance. +func NewConsensusState( + timestamp time.Time, root commitmenttypes.MerkleRoot, nextValsHash tmbytes.HexBytes, +) *ConsensusState { + return &ConsensusState{ + Timestamp: timestamp, + Root: root, + } +} + +// ClientType returns Dymint +func (ConsensusState) ClientType() string { + return exported.Dymint +} + +// GetRoot returns the commitment Root for the specific +func (cs ConsensusState) GetRoot() exported.Root { + return cs.Root +} + +// GetTimestamp returns block time in nanoseconds of the header that created consensus state +func (cs ConsensusState) GetTimestamp() uint64 { + return uint64(cs.Timestamp.UnixNano()) +} + +// ValidateBasic defines a basic validation for the dymint consensus state. +// NOTE: ProcessedTimestamp may be zero if this is an initial consensus state passed in by relayer +// as opposed to a consensus state constructed by the chain. +func (cs ConsensusState) ValidateBasic() error { + if cs.Root.Empty() { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "root cannot be empty") + } + if cs.Timestamp.Unix() <= 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp must be a positive Unix time") + } + return nil +} diff --git a/modules/light-clients/01-dymint/types/consensus_state_test.go b/modules/light-clients/01-dymint/types/consensus_state_test.go new file mode 100644 index 00000000000..bb7483d54d6 --- /dev/null +++ b/modules/light-clients/01-dymint/types/consensus_state_test.go @@ -0,0 +1,79 @@ +package types_test + +import ( + "time" + + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" +) + +func (suite *DymintTestSuite) TestConsensusStateValidateBasic() { + testCases := []struct { + msg string + consensusState *types.ConsensusState + expectPass bool + }{ + { + "success", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), + NextValidatorsHash: suite.valsHash, + }, + true, + }, + { + "success with sentinel", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.NewMerkleRoot([]byte(types.SentinelRoot)), + NextValidatorsHash: suite.valsHash, + }, + true, + }, + { + "root is nil", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.MerkleRoot{}, + NextValidatorsHash: suite.valsHash, + }, + false, + }, + { + "root is empty", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.MerkleRoot{}, + NextValidatorsHash: suite.valsHash, + }, + false, + }, + + { + "timestamp is zero", + &types.ConsensusState{ + Timestamp: time.Time{}, + Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), + NextValidatorsHash: suite.valsHash, + }, + false, + }, + } + + for i, tc := range testCases { + tc := tc + + // check just to increase coverage + suite.Require().Equal(exported.Dymint, tc.consensusState.ClientType()) + suite.Require().Equal(tc.consensusState.GetRoot(), tc.consensusState.Root) + + err := tc.consensusState.ValidateBasic() + if tc.expectPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg) + } + } +} diff --git a/modules/light-clients/01-dymint/types/dymint.pb.go b/modules/light-clients/01-dymint/types/dymint.pb.go new file mode 100644 index 00000000000..dc81b06dce1 --- /dev/null +++ b/modules/light-clients/01-dymint/types/dymint.pb.go @@ -0,0 +1,1840 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/lightclients/dymint/dymint.proto + +package types + +import ( + fmt "fmt" + _go "github.com/confio/ics23/go" + types "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + types1 "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes" + types2 "github.com/tendermint/tendermint/proto/tendermint/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ClientState from Dymint tracks the current validator set, latest height, +// and a possible frozen height. +type ClientState struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + TrustLevel Fraction `protobuf:"bytes,2,opt,name=trust_level,json=trustLevel,proto3" json:"trust_level" yaml:"trust_level"` + // duration of the period since the LastestTimestamp during which the + // submitted headers are valid for upgrade + TrustingPeriod time.Duration `protobuf:"bytes,3,opt,name=trusting_period,json=trustingPeriod,proto3,stdduration" json:"trusting_period" yaml:"trusting_period"` + // duration of the staking unbonding period + UnbondingPeriod time.Duration `protobuf:"bytes,4,opt,name=unbonding_period,json=unbondingPeriod,proto3,stdduration" json:"unbonding_period" yaml:"unbonding_period"` + // defines how much new (untrusted) header's Time can drift into the future. + MaxClockDrift time.Duration `protobuf:"bytes,5,opt,name=max_clock_drift,json=maxClockDrift,proto3,stdduration" json:"max_clock_drift" yaml:"max_clock_drift"` + // Block height when the client was frozen due to a misbehaviour + FrozenHeight types.Height `protobuf:"bytes,6,opt,name=frozen_height,json=frozenHeight,proto3" json:"frozen_height" yaml:"frozen_height"` + // Latest height the client was updated to + LatestHeight types.Height `protobuf:"bytes,7,opt,name=latest_height,json=latestHeight,proto3" json:"latest_height" yaml:"latest_height"` + // Proof specifications used in verifying counterparty state + ProofSpecs []*_go.ProofSpec `protobuf:"bytes,8,rep,name=proof_specs,json=proofSpecs,proto3" json:"proof_specs,omitempty" yaml:"proof_specs"` + // Path at which next upgraded client will be committed. + // Each element corresponds to the key for a single CommitmentProof in the + // chained proof. NOTE: ClientState must stored under + // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored + // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using + // the default upgrade module, upgrade_path should be []string{"upgrade", + // "upgradedIBCState"}` + UpgradePath []string `protobuf:"bytes,9,rep,name=upgrade_path,json=upgradePath,proto3" json:"upgrade_path,omitempty" yaml:"upgrade_path"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_cef6cb256dd4d990, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +// ConsensusState defines the consensus state from Dymint. +type ConsensusState struct { + // timestamp that corresponds to the block height in which the ConsensusState + // was stored. + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + // commitment root (i.e app hash) + Root types1.MerkleRoot `protobuf:"bytes,2,opt,name=root,proto3" json:"root"` + NextValidatorsHash github_com_tendermint_tendermint_libs_bytes.HexBytes `protobuf:"bytes,3,opt,name=next_validators_hash,json=nextValidatorsHash,proto3,casttype=github.com/tendermint/tendermint/libs/bytes.HexBytes" json:"next_validators_hash,omitempty" yaml:"next_validators_hash"` +} + +func (m *ConsensusState) Reset() { *m = ConsensusState{} } +func (m *ConsensusState) String() string { return proto.CompactTextString(m) } +func (*ConsensusState) ProtoMessage() {} +func (*ConsensusState) Descriptor() ([]byte, []int) { + return fileDescriptor_cef6cb256dd4d990, []int{1} +} +func (m *ConsensusState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusState.Merge(m, src) +} +func (m *ConsensusState) XXX_Size() int { + return m.Size() +} +func (m *ConsensusState) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusState proto.InternalMessageInfo + +// Misbehaviour is a wrapper over two conflicting Headers +// that implements Misbehaviour interface expected by ICS-02 +type Misbehaviour struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Header1 *Header `protobuf:"bytes,2,opt,name=header_1,json=header1,proto3" json:"header_1,omitempty" yaml:"header_1"` + Header2 *Header `protobuf:"bytes,3,opt,name=header_2,json=header2,proto3" json:"header_2,omitempty" yaml:"header_2"` +} + +func (m *Misbehaviour) Reset() { *m = Misbehaviour{} } +func (m *Misbehaviour) String() string { return proto.CompactTextString(m) } +func (*Misbehaviour) ProtoMessage() {} +func (*Misbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_cef6cb256dd4d990, []int{2} +} +func (m *Misbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehaviour.Merge(m, src) +} +func (m *Misbehaviour) XXX_Size() int { + return m.Size() +} +func (m *Misbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo + +// Header defines the Dymint client consensus Header. +// It encapsulates all the information necessary to update from a trusted +// Dymint ConsensusState. The inclusion of TrustedHeight and +// TrustedValidators allows this update to process correctly, so long as the +// ConsensusState for the TrustedHeight exists, this removes race conditions +// among relayers The SignedHeader and ValidatorSet are the new untrusted update +// fields for the client. The TrustedHeight is the height of a stored +// ConsensusState on the client that will be used to verify the new untrusted +// header. The Trusted ConsensusState must be within the unbonding period of +// current time in order to correctly verify, and the TrustedValidators must +// hash to TrustedConsensusState.NextValidatorsHash since that is the last +// trusted validator set at the TrustedHeight. +type Header struct { + *types2.SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3,embedded=signed_header" json:"signed_header,omitempty" yaml:"signed_header"` + ValidatorSet *types2.ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty" yaml:"validator_set"` + TrustedHeight types.Height `protobuf:"bytes,3,opt,name=trusted_height,json=trustedHeight,proto3" json:"trusted_height" yaml:"trusted_height"` + TrustedValidators *types2.ValidatorSet `protobuf:"bytes,4,opt,name=trusted_validators,json=trustedValidators,proto3" json:"trusted_validators,omitempty" yaml:"trusted_validators"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_cef6cb256dd4d990, []int{3} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetValidatorSet() *types2.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *Header) GetTrustedHeight() types.Height { + if m != nil { + return m.TrustedHeight + } + return types.Height{} +} + +func (m *Header) GetTrustedValidators() *types2.ValidatorSet { + if m != nil { + return m.TrustedValidators + } + return nil +} + +// Fraction defines the protobuf message type for tmmath.Fraction that only +// supports positive values. +type Fraction struct { + Numerator uint64 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` + Denominator uint64 `protobuf:"varint,2,opt,name=denominator,proto3" json:"denominator,omitempty"` +} + +func (m *Fraction) Reset() { *m = Fraction{} } +func (m *Fraction) String() string { return proto.CompactTextString(m) } +func (*Fraction) ProtoMessage() {} +func (*Fraction) Descriptor() ([]byte, []int) { + return fileDescriptor_cef6cb256dd4d990, []int{4} +} +func (m *Fraction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Fraction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Fraction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Fraction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Fraction.Merge(m, src) +} +func (m *Fraction) XXX_Size() int { + return m.Size() +} +func (m *Fraction) XXX_DiscardUnknown() { + xxx_messageInfo_Fraction.DiscardUnknown(m) +} + +var xxx_messageInfo_Fraction proto.InternalMessageInfo + +func (m *Fraction) GetNumerator() uint64 { + if m != nil { + return m.Numerator + } + return 0 +} + +func (m *Fraction) GetDenominator() uint64 { + if m != nil { + return m.Denominator + } + return 0 +} + +func init() { + proto.RegisterType((*ClientState)(nil), "ibc.lightclients.dymint.ClientState") + proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.dymint.ConsensusState") + proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.dymint.Misbehaviour") + proto.RegisterType((*Header)(nil), "ibc.lightclients.dymint.Header") + proto.RegisterType((*Fraction)(nil), "ibc.lightclients.dymint.Fraction") +} + +func init() { + proto.RegisterFile("ibc/lightclients/dymint/dymint.proto", fileDescriptor_cef6cb256dd4d990) +} + +var fileDescriptor_cef6cb256dd4d990 = []byte{ + // 1002 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0x9b, 0xd0, 0x26, 0x93, 0x74, 0x5b, 0x4c, 0xd9, 0xa6, 0xa5, 0x1b, 0x07, 0x83, 0x44, + 0x2f, 0xb5, 0x49, 0xca, 0xa9, 0xe2, 0x82, 0xbb, 0x42, 0x2d, 0x62, 0xa5, 0xca, 0x45, 0x20, 0x2d, + 0x02, 0xe3, 0x3f, 0x13, 0x7b, 0xb4, 0xb6, 0xc7, 0xf2, 0x8c, 0xa3, 0x96, 0x4f, 0x00, 0x07, 0xa4, + 0x3d, 0x22, 0x4e, 0x1c, 0xf8, 0x30, 0x7b, 0xec, 0x91, 0x93, 0x41, 0xed, 0x37, 0xc8, 0x91, 0x0b, + 0x2b, 0xcf, 0x8c, 0xff, 0xa4, 0xbb, 0x55, 0xd5, 0x4b, 0x32, 0xef, 0xbd, 0xdf, 0xfb, 0xfd, 0x3c, + 0x6f, 0xe6, 0x3d, 0x1b, 0x7c, 0x8c, 0x1c, 0x57, 0x0f, 0x91, 0x1f, 0x50, 0x37, 0x44, 0x30, 0xa6, + 0x44, 0xf7, 0x2e, 0x23, 0x14, 0x53, 0xf1, 0xa7, 0x25, 0x29, 0xa6, 0x58, 0xde, 0x46, 0x8e, 0xab, + 0x35, 0x51, 0x1a, 0x0f, 0xef, 0x8e, 0x29, 0x8c, 0x3d, 0x98, 0xb2, 0x0c, 0x7a, 0x99, 0x40, 0xa2, + 0xcf, 0xed, 0x10, 0x79, 0x36, 0xc5, 0x29, 0x4f, 0xdd, 0xdd, 0x7b, 0x03, 0xc1, 0x7e, 0x45, 0x74, + 0x90, 0xa4, 0x18, 0xcf, 0x4a, 0x6b, 0xe4, 0x63, 0xec, 0x87, 0x50, 0x67, 0x96, 0x93, 0xcd, 0x74, + 0x2f, 0x4b, 0x6d, 0x8a, 0x70, 0x2c, 0xe2, 0xca, 0xed, 0x38, 0x45, 0x11, 0x24, 0xd4, 0x8e, 0x92, + 0x12, 0x50, 0xec, 0xc6, 0xc5, 0x29, 0xd4, 0xf9, 0x73, 0xea, 0xf3, 0x89, 0x58, 0x09, 0xc0, 0x27, + 0x35, 0x00, 0x47, 0x11, 0xa2, 0x51, 0x09, 0xaa, 0x2c, 0x01, 0xdc, 0xf2, 0xb1, 0x8f, 0xd9, 0x52, + 0x2f, 0x56, 0xdc, 0xab, 0xfe, 0xb6, 0x0a, 0xfa, 0xc7, 0x8c, 0xef, 0x9c, 0xda, 0x14, 0xca, 0x3b, + 0xa0, 0xeb, 0x06, 0x36, 0x8a, 0x2d, 0xe4, 0x0d, 0xa5, 0xb1, 0xb4, 0xdf, 0x33, 0xd7, 0x98, 0x7d, + 0xea, 0xc9, 0x3f, 0x82, 0x3e, 0x4d, 0x33, 0x42, 0xad, 0x10, 0xce, 0x61, 0x38, 0x5c, 0x19, 0x4b, + 0xfb, 0xfd, 0xe9, 0x87, 0xda, 0x1d, 0x85, 0xd4, 0xbe, 0x4c, 0x6d, 0xb7, 0xd8, 0xa9, 0xb1, 0xfb, + 0x2a, 0x57, 0x5a, 0x8b, 0x5c, 0x91, 0x2f, 0xed, 0x28, 0x3c, 0x52, 0x1b, 0x1c, 0xaa, 0x09, 0x98, + 0xf5, 0x75, 0x61, 0xc8, 0x33, 0xb0, 0xc1, 0x2c, 0x14, 0xfb, 0x56, 0x02, 0x53, 0x84, 0xbd, 0x61, + 0x9b, 0x69, 0xec, 0x68, 0xbc, 0x4a, 0x5a, 0x59, 0x25, 0xed, 0xa9, 0xa8, 0xa2, 0xa1, 0x0a, 0xee, + 0xc7, 0x0d, 0xee, 0x3a, 0x5f, 0xfd, 0xfd, 0x1f, 0x45, 0x32, 0x1f, 0x95, 0xde, 0x33, 0xe6, 0x94, + 0x11, 0xd8, 0xcc, 0x62, 0x07, 0xc7, 0x5e, 0x43, 0xa8, 0x73, 0x9f, 0xd0, 0x47, 0x42, 0x68, 0x9b, + 0x0b, 0xdd, 0x26, 0xe0, 0x4a, 0x1b, 0x95, 0x5b, 0x48, 0x41, 0xb0, 0x11, 0xd9, 0x17, 0x96, 0x1b, + 0x62, 0xf7, 0x85, 0xe5, 0xa5, 0x68, 0x46, 0x87, 0xef, 0x3c, 0x70, 0x4b, 0xb7, 0xf2, 0xb9, 0xd0, + 0x7a, 0x64, 0x5f, 0x1c, 0x17, 0xce, 0xa7, 0x85, 0x4f, 0xfe, 0x01, 0xac, 0xcf, 0x52, 0xfc, 0x33, + 0x8c, 0xad, 0x00, 0x16, 0x27, 0x31, 0x5c, 0x65, 0x22, 0xbb, 0xec, 0x6c, 0x8a, 0xbb, 0xa1, 0x89, + 0x2b, 0x33, 0x9f, 0x68, 0x27, 0x0c, 0x61, 0xec, 0x09, 0x95, 0x2d, 0xae, 0xb2, 0x94, 0xae, 0x9a, + 0x03, 0x6e, 0x73, 0x6c, 0x41, 0x1f, 0xda, 0x14, 0x12, 0x5a, 0xd2, 0xaf, 0x3d, 0x94, 0x7e, 0x29, + 0x5d, 0x35, 0x07, 0xdc, 0x16, 0xf4, 0xa7, 0xa0, 0xcf, 0x7a, 0xc6, 0x22, 0x09, 0x74, 0xc9, 0xb0, + 0x3b, 0x6e, 0xef, 0xf7, 0xa7, 0x9b, 0x1a, 0x72, 0xc9, 0xf4, 0x50, 0x3b, 0x2b, 0x22, 0xe7, 0x09, + 0x74, 0x8d, 0xc7, 0xf5, 0x15, 0x6a, 0xc0, 0x55, 0x13, 0x24, 0x25, 0x84, 0xc8, 0x47, 0x60, 0x90, + 0x25, 0x7e, 0x6a, 0x7b, 0xd0, 0x4a, 0x6c, 0x1a, 0x0c, 0x7b, 0xe3, 0xf6, 0x7e, 0xcf, 0xd8, 0x5e, + 0xe4, 0xca, 0x7b, 0xe2, 0xdc, 0x1a, 0x51, 0xd5, 0xec, 0x0b, 0xf3, 0xcc, 0xa6, 0xc1, 0x51, 0xe7, + 0x97, 0x3f, 0x95, 0x96, 0xfa, 0xd7, 0x0a, 0x78, 0x74, 0x8c, 0x63, 0x02, 0x63, 0x92, 0x11, 0xde, + 0x12, 0x06, 0xe8, 0x55, 0x5d, 0xc9, 0x7a, 0xa2, 0xd8, 0xfa, 0xed, 0xe3, 0xfb, 0xa6, 0x44, 0x18, + 0xdd, 0x62, 0xeb, 0x2f, 0x8b, 0x53, 0xaa, 0xd3, 0xe4, 0xcf, 0x41, 0x27, 0xc5, 0x98, 0x8a, 0xa6, + 0x51, 0x1b, 0x95, 0xab, 0xdb, 0x74, 0x3e, 0xd1, 0x9e, 0xc1, 0xf4, 0x45, 0x08, 0x4d, 0x8c, 0xa9, + 0xd1, 0x29, 0x68, 0x4c, 0x96, 0x25, 0xff, 0x2a, 0x81, 0xad, 0x18, 0x5e, 0x50, 0xab, 0x1a, 0x45, + 0xc4, 0x0a, 0x6c, 0x12, 0xb0, 0xfe, 0x18, 0x18, 0xdf, 0x2d, 0x72, 0xe5, 0x03, 0xbe, 0xbf, 0xb7, + 0xa1, 0xd4, 0xff, 0x72, 0xe5, 0x33, 0x1f, 0xd1, 0x20, 0x73, 0x0a, 0x39, 0xbd, 0x39, 0xbe, 0xea, + 0x65, 0x88, 0x1c, 0xa2, 0x3b, 0x97, 0x14, 0x12, 0xed, 0x04, 0x5e, 0x18, 0xc5, 0xc2, 0x94, 0x0b, + 0xba, 0x6f, 0x2b, 0xb6, 0x13, 0x9b, 0x94, 0x65, 0xfa, 0x5f, 0x02, 0x83, 0x67, 0x88, 0x38, 0x30, + 0xb0, 0xe7, 0x08, 0x67, 0xa9, 0x3c, 0x01, 0x3d, 0x7e, 0x09, 0xaa, 0xc1, 0x61, 0x6c, 0x2d, 0x72, + 0x65, 0x93, 0x3f, 0x56, 0x15, 0x52, 0xcd, 0x2e, 0x5f, 0x9f, 0x7a, 0xf2, 0x73, 0xd0, 0x0d, 0xa0, + 0xed, 0xc1, 0xd4, 0x9a, 0x88, 0xba, 0x28, 0x77, 0x0e, 0x93, 0x13, 0x06, 0x34, 0x46, 0xd7, 0xb9, + 0xb2, 0xc6, 0xd7, 0x93, 0x45, 0xae, 0x6c, 0x70, 0xf6, 0x92, 0x45, 0x35, 0xd7, 0xf8, 0x72, 0xd2, + 0xe0, 0x9e, 0x8a, 0x21, 0xf2, 0x10, 0xee, 0xe9, 0x1b, 0xdc, 0xd3, 0x8a, 0x7b, 0x2a, 0x2a, 0xf0, + 0x47, 0x1b, 0xac, 0x72, 0xb4, 0x6c, 0x83, 0x75, 0x82, 0xfc, 0x18, 0x7a, 0x16, 0x87, 0x88, 0x4b, + 0x32, 0xd2, 0xea, 0xf2, 0x6a, 0xfc, 0x15, 0x71, 0xce, 0x60, 0x42, 0x70, 0xef, 0x2a, 0x57, 0xa4, + 0xba, 0x47, 0x96, 0x28, 0x54, 0x73, 0x40, 0x1a, 0xd8, 0xa2, 0x05, 0xab, 0x53, 0xb5, 0x08, 0x2c, + 0x2f, 0xd2, 0x5b, 0x24, 0xaa, 0xe3, 0x3a, 0x87, 0xd4, 0x18, 0xd6, 0xf4, 0x4b, 0xe9, 0xaa, 0x39, + 0x98, 0x37, 0x70, 0xf2, 0x4f, 0x80, 0x0f, 0x49, 0xa6, 0xcf, 0x5a, 0xbc, 0x7d, 0x6f, 0x8b, 0x3f, + 0x11, 0x2d, 0xfe, 0x7e, 0x63, 0xf4, 0x56, 0xf9, 0xaa, 0xb9, 0x2e, 0x1c, 0xa2, 0xc9, 0x43, 0x20, + 0x97, 0x88, 0xfa, 0x7a, 0x8a, 0xb1, 0x7b, 0xdf, 0x2e, 0x9e, 0x2c, 0x72, 0x65, 0x67, 0x59, 0xa5, + 0xe6, 0x50, 0xcd, 0x77, 0x85, 0xb3, 0xbe, 0xa8, 0xea, 0x57, 0xa0, 0x5b, 0xbe, 0x7e, 0xe4, 0x3d, + 0xd0, 0x8b, 0xb3, 0x08, 0xa6, 0x45, 0x84, 0x9d, 0x4c, 0xc7, 0xac, 0x1d, 0xf2, 0x18, 0xf4, 0x3d, + 0x18, 0xe3, 0x08, 0xc5, 0x2c, 0xbe, 0xc2, 0xe2, 0x4d, 0x97, 0xf1, 0xfd, 0xab, 0xeb, 0x91, 0x74, + 0x75, 0x3d, 0x92, 0xfe, 0xbd, 0x1e, 0x49, 0x2f, 0x6f, 0x46, 0xad, 0xab, 0x9b, 0x51, 0xeb, 0xef, + 0x9b, 0x51, 0xeb, 0xf9, 0x17, 0x8d, 0xa6, 0x72, 0x31, 0x89, 0x30, 0xd1, 0x91, 0xe3, 0x1e, 0xf8, + 0x58, 0x9f, 0x1f, 0xea, 0x11, 0xf6, 0xb2, 0x10, 0x12, 0xfe, 0x25, 0x72, 0x50, 0x7e, 0x8a, 0x7c, + 0x3a, 0x39, 0x10, 0x5f, 0x23, 0x6c, 0x9f, 0xce, 0x2a, 0x1b, 0x20, 0x87, 0xaf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0xe7, 0x69, 0x57, 0xd0, 0xb5, 0x08, 0x00, 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UpgradePath) > 0 { + for iNdEx := len(m.UpgradePath) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UpgradePath[iNdEx]) + copy(dAtA[i:], m.UpgradePath[iNdEx]) + i = encodeVarintDymint(dAtA, i, uint64(len(m.UpgradePath[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ProofSpecs) > 0 { + for iNdEx := len(m.ProofSpecs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProofSpecs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.LatestHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.FrozenHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxClockDrift, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintDymint(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x2a + n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.UnbondingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintDymint(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x22 + n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TrustingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintDymint(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x1a + { + size, err := m.TrustLevel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintDymint(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintDymint(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Root.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintDymint(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Misbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Header2 != nil { + { + size, err := m.Header2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Header1 != nil { + { + size, err := m.Header1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintDymint(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TrustedValidators != nil { + { + size, err := m.TrustedValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.TrustedHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDymint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Fraction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Fraction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Fraction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Denominator != 0 { + i = encodeVarintDymint(dAtA, i, uint64(m.Denominator)) + i-- + dAtA[i] = 0x10 + } + if m.Numerator != 0 { + i = encodeVarintDymint(dAtA, i, uint64(m.Numerator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDymint(dAtA []byte, offset int, v uint64) int { + offset -= sovDymint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovDymint(uint64(l)) + } + l = m.TrustLevel.Size() + n += 1 + l + sovDymint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod) + n += 1 + l + sovDymint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod) + n += 1 + l + sovDymint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift) + n += 1 + l + sovDymint(uint64(l)) + l = m.FrozenHeight.Size() + n += 1 + l + sovDymint(uint64(l)) + l = m.LatestHeight.Size() + n += 1 + l + sovDymint(uint64(l)) + if len(m.ProofSpecs) > 0 { + for _, e := range m.ProofSpecs { + l = e.Size() + n += 1 + l + sovDymint(uint64(l)) + } + } + if len(m.UpgradePath) > 0 { + for _, s := range m.UpgradePath { + l = len(s) + n += 1 + l + sovDymint(uint64(l)) + } + } + return n +} + +func (m *ConsensusState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovDymint(uint64(l)) + l = m.Root.Size() + n += 1 + l + sovDymint(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovDymint(uint64(l)) + } + return n +} + +func (m *Misbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovDymint(uint64(l)) + } + if m.Header1 != nil { + l = m.Header1.Size() + n += 1 + l + sovDymint(uint64(l)) + } + if m.Header2 != nil { + l = m.Header2.Size() + n += 1 + l + sovDymint(uint64(l)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovDymint(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovDymint(uint64(l)) + } + l = m.TrustedHeight.Size() + n += 1 + l + sovDymint(uint64(l)) + if m.TrustedValidators != nil { + l = m.TrustedValidators.Size() + n += 1 + l + sovDymint(uint64(l)) + } + return n +} + +func (m *Fraction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Numerator != 0 { + n += 1 + sovDymint(uint64(m.Numerator)) + } + if m.Denominator != 0 { + n += 1 + sovDymint(uint64(m.Denominator)) + } + return n +} + +func sovDymint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDymint(x uint64) (n int) { + return sovDymint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustLevel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TrustLevel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustingPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TrustingPeriod, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.UnbondingPeriod, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxClockDrift", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MaxClockDrift, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrozenHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrozenHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LatestHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofSpecs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofSpecs = append(m.ProofSpecs, &_go.ProofSpec{}) + if err := m.ProofSpecs[len(m.ProofSpecs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpgradePath = append(m.UpgradePath, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDymint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDymint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Root.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDymint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDymint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header1", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header1 == nil { + m.Header1 = &Header{} + } + if err := m.Header1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header2 == nil { + m.Header2 = &Header{} + } + if err := m.Header2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDymint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDymint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedHeader == nil { + m.SignedHeader = &types2.SignedHeader{} + } + if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types2.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TrustedHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDymint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDymint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TrustedValidators == nil { + m.TrustedValidators = &types2.ValidatorSet{} + } + if err := m.TrustedValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDymint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDymint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Fraction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Fraction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Fraction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Numerator", wireType) + } + m.Numerator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Numerator |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denominator", wireType) + } + m.Denominator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDymint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Denominator |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDymint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDymint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDymint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDymint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDymint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDymint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDymint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDymint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDymint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDymint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDymint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDymint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/light-clients/01-dymint/types/dymint_test.go b/modules/light-clients/01-dymint/types/dymint_test.go new file mode 100644 index 00000000000..84b7d4947a4 --- /dev/null +++ b/modules/light-clients/01-dymint/types/dymint_test.go @@ -0,0 +1,155 @@ +package types_test + +import ( + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/suite" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" + ibctestingmock "github.com/cosmos/ibc-go/v5/testing/mock" + "github.com/cosmos/ibc-go/v5/testing/simapp" +) + +const ( + chainID = "gaia" + chainIDRevision0 = "gaia-revision-0" + chainIDRevision1 = "gaia-revision-1" + clientID = "gaiamainnet" + trustingPeriod time.Duration = time.Hour * 24 * 7 * 2 + ubdPeriod time.Duration = time.Hour * 24 * 7 * 3 + maxClockDrift time.Duration = time.Second * 10 +) + +var ( + height = clienttypes.NewHeight(0, 4) + newClientHeight = clienttypes.NewHeight(1, 1) + upgradePath = []string{"upgrade", "upgradedIBCState"} +) + +type DymintTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + // TODO: deprecate usage in favor of testing package + ctx sdk.Context + cdc codec.Codec + privVal tmtypes.PrivValidator + valSet *tmtypes.ValidatorSet + signers map[string]tmtypes.PrivValidator + valsHash tmbytes.HexBytes + header *ibctmtypes.Header + now time.Time + headerTime time.Time + clientTime time.Time + + // consensus setup + chainAConsensusType string + chainBConsensusType string +} + +func (suite *DymintTestSuite) SetupTest() { + // suite.SetupTestWithConsensusType(exported.Dymint, exported.Tendermint) + suite.SetupTestWithConsensusType(suite.chainAConsensusType, suite.chainBConsensusType) +} + +func (suite *DymintTestSuite) SetupTestWithConsensusType(chainAConsensusType string, chainBConsensusType string) { + // FIXME + suite.Require().True(chainAConsensusType == exported.Dymint || chainBConsensusType == exported.Dymint) + suite.Require().True(chainAConsensusType == exported.Dymint || chainAConsensusType == exported.Tendermint) + suite.Require().True(chainBConsensusType == exported.Dymint || chainBConsensusType == exported.Tendermint) + + suite.coordinator = ibctesting.NewCoordinatorWithConsensusType(suite.T(), []string{chainAConsensusType, chainBConsensusType}) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) + + // TODO: deprecate usage in favor of testing package + checkTx := false + app := simapp.Setup(checkTx) + + suite.cdc = app.AppCodec() + + // now is the time of the current chain, must be after the updating header + // mocks ctx.BlockTime() + suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + suite.clientTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + // Header time is intended to be time for any new header used for updates + suite.headerTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + + suite.privVal = ibctestingmock.NewPV() + + pubKey, err := suite.privVal.GetPubKey() + suite.Require().NoError(err) + + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + val := tmtypes.NewValidator(pubKey, 10) + suite.signers = make(map[string]tmtypes.PrivValidator) + suite.signers[val.Address.String()] = suite.privVal + + suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{val}) + suite.valsHash = suite.valSet.Hash() + if chainAConsensusType == exported.Tendermint { + chainBDymint := suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + suite.header = chainBDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.signers) + } else { + // chainA must be Dymint + chainADymint := suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + suite.header = chainADymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.signers) + } + + suite.ctx = app.BaseApp.NewContext(checkTx, tmproto.Header{Height: 1, Time: suite.now}) +} + +func getSuiteSigners(suite *DymintTestSuite) []tmtypes.PrivValidator { + return []tmtypes.PrivValidator{suite.privVal} +} + +func getBothSigners(suite *DymintTestSuite, altVal *tmtypes.Validator, altPrivVal tmtypes.PrivValidator) (*tmtypes.ValidatorSet, map[string]tmtypes.PrivValidator) { + // Create bothValSet with both suite validator and altVal. Would be valid update + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners := map[string]tmtypes.PrivValidator{ + suiteVal.Address.String(): suite.privVal, + altVal.Address.String(): altPrivVal, + } + return bothValSet, bothSigners +} + +func TestDymintTestSuiteDymTm(t *testing.T) { + suite.Run(t, &DymintTestSuite{ + chainAConsensusType: exported.Dymint, + chainBConsensusType: exported.Tendermint, + }) +} + +func TestDymintTestSuiteTmDym(t *testing.T) { + suite.Run(t, &DymintTestSuite{ + chainAConsensusType: exported.Tendermint, + chainBConsensusType: exported.Dymint, + }) +} + +func TestDymintTestSuiteDymDym(t *testing.T) { + suite.Run(t, &DymintTestSuite{ + chainAConsensusType: exported.Dymint, + chainBConsensusType: exported.Dymint, + }) +} diff --git a/modules/light-clients/01-dymint/types/errors.go b/modules/light-clients/01-dymint/types/errors.go new file mode 100644 index 00000000000..e99dde63404 --- /dev/null +++ b/modules/light-clients/01-dymint/types/errors.go @@ -0,0 +1,24 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + SubModuleName = "dymint-client" +) + +// IBC dymint client sentinel errors +var ( + ErrInvalidChainID = sdkerrors.Register(SubModuleName, 2, "invalid chain-id") + ErrInvalidTrustingPeriod = sdkerrors.Register(SubModuleName, 3, "invalid trusting period") + ErrInvalidHeaderHeight = sdkerrors.Register(SubModuleName, 4, "invalid header height") + ErrInvalidHeader = sdkerrors.Register(SubModuleName, 5, "invalid header") + ErrInvalidMaxClockDrift = sdkerrors.Register(SubModuleName, 6, "invalid max clock drift") + ErrProcessedTimeNotFound = sdkerrors.Register(SubModuleName, 7, "processed time not found") + ErrProcessedHeightNotFound = sdkerrors.Register(SubModuleName, 8, "processed height not found") + ErrDelayPeriodNotPassed = sdkerrors.Register(SubModuleName, 9, "packet-specified delay period has not been reached") + ErrTrustingPeriodExpired = sdkerrors.Register(SubModuleName, 10, "time since latest trusted state has passed the trusting period") + ErrInvalidProofSpecs = sdkerrors.Register(SubModuleName, 12, "invalid proof specs") + ErrInvalidValidatorSet = sdkerrors.Register(SubModuleName, 13, "invalid validator set") +) diff --git a/modules/light-clients/01-dymint/types/genesis.go b/modules/light-clients/01-dymint/types/genesis.go new file mode 100644 index 00000000000..8f9a8731a80 --- /dev/null +++ b/modules/light-clients/01-dymint/types/genesis.go @@ -0,0 +1,22 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// ExportMetadata exports all the consensus metadata in the client store so they can be included in clients genesis +// and imported by a ClientKeeper +func (cs ClientState) ExportMetadata(store sdk.KVStore) []exported.GenesisMetadata { + gm := make([]exported.GenesisMetadata, 0) + IterateConsensusMetadata(store, func(key, val []byte) bool { + gm = append(gm, clienttypes.NewGenesisMetadata(key, val)) + return false + }) + if len(gm) == 0 { + return nil + } + return gm +} diff --git a/modules/light-clients/01-dymint/types/genesis_test.go b/modules/light-clients/01-dymint/types/genesis_test.go new file mode 100644 index 00000000000..1466ab34b4b --- /dev/null +++ b/modules/light-clients/01-dymint/types/genesis_test.go @@ -0,0 +1,89 @@ +package types_test + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" +) + +// expected export ordering: +// processed height and processed time per height +// then all iteration keys +func (suite *DymintTestSuite) TestExportMetadata() { + // test intializing client and exporting metadata + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path) + clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + clientState := path.EndpointA.GetClientState() + height := clientState.GetLatestHeight() + + initIteration := types.GetIterationKey(clientStore, height) + suite.Require().NotEqual(0, len(initIteration)) + initProcessedTime, found := types.GetProcessedTime(clientStore, height) + suite.Require().True(found) + initProcessedHeight, found := types.GetProcessedHeight(clientStore, height) + suite.Require().True(found) + + gm := clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)) + suite.Require().NotNil(gm, "client with metadata returned nil exported metadata") + suite.Require().Len(gm, 3, "exported metadata has unexpected length") + + suite.Require().Equal(types.ProcessedHeightKey(height), gm[0].GetKey(), "metadata has unexpected key") + actualProcessedHeight, err := clienttypes.ParseHeight(string(gm[0].GetValue())) + suite.Require().NoError(err) + suite.Require().Equal(initProcessedHeight, actualProcessedHeight, "metadata has unexpected value") + + suite.Require().Equal(types.ProcessedTimeKey(height), gm[1].GetKey(), "metadata has unexpected key") + suite.Require().Equal(initProcessedTime, sdk.BigEndianToUint64(gm[1].GetValue()), "metadata has unexpected value") + + suite.Require().Equal(types.IterationKey(height), gm[2].GetKey(), "metadata has unexpected key") + suite.Require().Equal(initIteration, gm[2].GetValue(), "metadata has unexpected value") + + // test updating client and exporting metadata + err = path.EndpointA.UpdateClient() + suite.Require().NoError(err) + + clientState = path.EndpointA.GetClientState() + updateHeight := clientState.GetLatestHeight() + + iteration := types.GetIterationKey(clientStore, updateHeight) + suite.Require().NotEqual(0, len(initIteration)) + processedTime, found := types.GetProcessedTime(clientStore, updateHeight) + suite.Require().True(found) + processedHeight, found := types.GetProcessedHeight(clientStore, updateHeight) + suite.Require().True(found) + + gm = clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)) + suite.Require().NotNil(gm, "client with metadata returned nil exported metadata") + suite.Require().Len(gm, 6, "exported metadata has unexpected length") + + // expected ordering: + // initProcessedHeight, initProcessedTime, processedHeight, processedTime, initIteration, iteration + + // check init processed height and time + suite.Require().Equal(types.ProcessedHeightKey(height), gm[0].GetKey(), "metadata has unexpected key") + actualProcessedHeight, err = clienttypes.ParseHeight(string(gm[0].GetValue())) + suite.Require().NoError(err) + suite.Require().Equal(initProcessedHeight, actualProcessedHeight, "metadata has unexpected value") + + suite.Require().Equal(types.ProcessedTimeKey(height), gm[1].GetKey(), "metadata has unexpected key") + suite.Require().Equal(initProcessedTime, sdk.BigEndianToUint64(gm[1].GetValue()), "metadata has unexpected value") + + // check processed height and time after update + suite.Require().Equal(types.ProcessedHeightKey(updateHeight), gm[2].GetKey(), "metadata has unexpected key") + actualProcessedHeight, err = clienttypes.ParseHeight(string(gm[2].GetValue())) + suite.Require().NoError(err) + suite.Require().Equal(processedHeight, actualProcessedHeight, "metadata has unexpected value") + + suite.Require().Equal(types.ProcessedTimeKey(updateHeight), gm[3].GetKey(), "metadata has unexpected key") + suite.Require().Equal(processedTime, sdk.BigEndianToUint64(gm[3].GetValue()), "metadata has unexpected value") + + // check iteration keys + suite.Require().Equal(types.IterationKey(height), gm[4].GetKey(), "metadata has unexpected key") + suite.Require().Equal(initIteration, gm[4].GetValue(), "metadata has unexpected value") + + suite.Require().Equal(types.IterationKey(updateHeight), gm[5].GetKey(), "metadata has unexpected key") + suite.Require().Equal(iteration, gm[5].GetValue(), "metadata has unexpected value") +} diff --git a/modules/light-clients/01-dymint/types/header.go b/modules/light-clients/01-dymint/types/header.go new file mode 100644 index 00000000000..bda7c4934b4 --- /dev/null +++ b/modules/light-clients/01-dymint/types/header.go @@ -0,0 +1,137 @@ +package types + +import ( + "bytes" + fmt "fmt" + "time" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ exported.Header = &Header{} + +// ConsensusState returns the updated consensus state associated with the header +func (h Header) ConsensusState() *ConsensusState { + return &ConsensusState{ + Timestamp: h.GetTime(), + Root: commitmenttypes.NewMerkleRoot(h.Header.GetAppHash()), + } +} + +// ClientType defines that the Header is a Dymint rollapp +func (h Header) ClientType() string { + return exported.Dymint +} + +// GetChainID returns the chain-id +func (h Header) GetChainID() string { + return h.Header.ChainID +} + +// GetHeight returns the current height. It returns 0 if the dymint +// header is nil. +// NOTE: the header.Header is checked to be non nil in ValidateBasic. +func (h Header) GetHeight() exported.Height { + revision := clienttypes.ParseChainID(h.Header.ChainID) + return clienttypes.NewHeight(revision, uint64(h.Header.Height)) +} + +// GetTime returns the current block timestamp. It returns a zero time if +// the dymint header is nil. +// NOTE: the header.Header is checked to be non nil in ValidateBasic. +func (h Header) GetTime() time.Time { + return h.Header.Time +} + +// ValidateBasic calls the SignedHeader ValidateBasic function and checks +// that validatorsets are not nil. +// NOTE: TrustedHeight may be empty when creating client +// with MsgCreateClient +func (h Header) ValidateBasic() error { + if h.SignedHeader == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "dymint signed header cannot be nil") + } + if h.Header == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "dymint header cannot be nil") + } + tmSignedHeader, err := tmtypes.SignedHeaderFromProto(h.SignedHeader) + if err != nil { + return sdkerrors.Wrap(err, "header is not a dymint header") + } + if err := tmSignedHeader.ValidateBasic(h.Header.GetChainID()); err != nil { + return sdkerrors.Wrap(err, "header failed basic validation") + } + + // TrustedHeight is less than Header for updates and misbehaviour + if h.TrustedHeight.GTE(h.GetHeight()) { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "TrustedHeight %d must be less than header height %d", + h.TrustedHeight, h.GetHeight()) + } + + if h.ValidatorSet == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set is nil") + } + tmValset, err := tmtypes.ValidatorSetFromProto(h.ValidatorSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set is not dymint validator set") + } + if !bytes.Equal(h.Header.ValidatorsHash, tmValset.Hash()) { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set does not match hash") + } + return nil +} + +// ValidateCommit checks if the given commit is a valid commit from the passed-in validatorset +func (h Header) ValidateCommit() (err error) { + blockID, err := tmtypes.BlockIDFromProto(&h.SignedHeader.Commit.BlockID) + if err != nil { + return sdkerrors.Wrap(err, "invalid block ID from header SignedHeader.Commit") + } + tmCommit, err := tmtypes.CommitFromProto(h.Commit) + if err != nil { + return sdkerrors.Wrap(err, "commit is not dymint commit type") + } + tmValset, err := tmtypes.ValidatorSetFromProto(h.ValidatorSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set is not dymint validator set type") + } + + if tmValset.Size() != len(tmCommit.Signatures) { + return tmtypes.NewErrInvalidCommitSignatures(tmValset.Size(), len(tmCommit.Signatures)) + } + + if !blockID.Equals(tmCommit.BlockID) { + return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", + blockID, tmCommit.BlockID) + } + + // We don't know the validators that committed this block, so we have to + // check for each vote if its validator is already known. + valIdx, val := tmValset.GetByAddress(h.Header.ProposerAddress) + if val != nil { + commitSig := tmCommit.Signatures[valIdx] + if !commitSig.ForBlock() { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set did not commit to header") + } + // Validate signature. + if !bytes.Equal(commitSig.ValidatorAddress, h.Header.ProposerAddress) { + return fmt.Errorf("wrong proposer address in commit, got %X) but expected %X", valIdx, h.Header.ProposerAddress) + } + headerBytes, err := h.SignedHeader.Header.Marshal() + if err != nil { + return err + } + if !val.PubKey.VerifySignature(headerBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", valIdx, commitSig.Signature) + } + } else { + return fmt.Errorf("proposer is not in the validator set (proposer: %x)", h.Header.ProposerAddress) + } + + return nil +} diff --git a/modules/light-clients/01-dymint/types/header_test.go b/modules/light-clients/01-dymint/types/header_test.go new file mode 100644 index 00000000000..fbf98f34493 --- /dev/null +++ b/modules/light-clients/01-dymint/types/header_test.go @@ -0,0 +1,110 @@ +package types_test + +import ( + "time" + + tmprotocrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + tmprototypes "github.com/tendermint/tendermint/proto/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" +) + +func (suite *DymintTestSuite) TestGetHeight() { + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + header := suite.chainA.TestChainClient.(*ibctesting.TestChainDymint).LastHeader + suite.Require().NotEqual(uint64(0), header.GetHeight()) + } else { + // chainB must be Dymint + header := suite.chainB.TestChainClient.(*ibctesting.TestChainDymint).LastHeader + suite.Require().NotEqual(uint64(0), header.GetHeight()) + } +} + +func (suite *DymintTestSuite) TestGetTime() { + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + header := suite.chainA.TestChainClient.(*ibctesting.TestChainDymint).LastHeader + suite.Require().NotEqual(time.Time{}, header.GetTime()) + } else { + // chainB must be Dymint + header := suite.chainB.TestChainClient.(*ibctesting.TestChainDymint).LastHeader + suite.Require().NotEqual(time.Time{}, header.GetTime()) + } +} + +func (suite *DymintTestSuite) TestHeaderValidateBasic() { + var ( + header *types.Header + dymintChain *ibctesting.TestChainDymint + ) + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"valid header", func() {}, true}, + {"header is nil", func() { + header.Header = nil + }, false}, + {"signed header is nil", func() { + header.SignedHeader = nil + }, false}, + {"SignedHeaderFromProto failed", func() { + header.SignedHeader.Commit.Height = -1 + }, false}, + {"signed header failed dymint ValidateBasic", func() { + header = dymintChain.LastHeader + header.SignedHeader.Commit = nil + }, false}, + {"trusted height is equal to header height", func() { + header.TrustedHeight = header.GetHeight().(clienttypes.Height) + }, false}, + {"validator set nil", func() { + header.ValidatorSet = nil + }, false}, + {"ValidatorSetFromProto failed", func() { + header.ValidatorSet.Validators[0].PubKey = tmprotocrypto.PublicKey{} + }, false}, + {"header validator hash does not equal hash of validator set", func() { + // generated new validator set + val := tmprototypes.Validator{} + valSet := tmprototypes.ValidatorSet{ + Validators: []*tmprototypes.Validator{&val}, + Proposer: &val, + TotalVotingPower: 0, + } + header.ValidatorSet = &valSet + }, false}, + } + + suite.Require().Equal(exported.Dymint, suite.header.ClientType()) + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintChain = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + } else { + // chainB must be Dymint + dymintChain = suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + } + + header = dymintChain.LastHeader // must be explicitly changed in malleate + + tc.malleate() + + err := header.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/modules/light-clients/01-dymint/types/misbehaviour.go b/modules/light-clients/01-dymint/types/misbehaviour.go new file mode 100644 index 00000000000..1132f07a51d --- /dev/null +++ b/modules/light-clients/01-dymint/types/misbehaviour.go @@ -0,0 +1,101 @@ +package types + +import ( + "time" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ exported.Misbehaviour = &Misbehaviour{} + +// FrozenHeight - Use the same FrozenHeight for all misbehaviour +var FrozenHeight = clienttypes.NewHeight(0, 1) + +// NewMisbehaviour creates a new Misbehaviour instance. +func NewMisbehaviour(clientID string, header1, header2 *Header) *Misbehaviour { + return &Misbehaviour{ + ClientId: clientID, + Header1: header1, + Header2: header2, + } +} + +// ClientType is Dymint light client +func (misbehaviour Misbehaviour) ClientType() string { + return exported.Dymint +} + +// GetChainID returns the chain-id +func (misbehaviour Misbehaviour) GetChainID() string { + // assuming Header1.hainID and Header2.hainID are same as checked in ValidateBasic + return misbehaviour.Header1.GetChainID() +} + +// GetClientID returns the ID of the client that committed a misbehaviour. +func (misbehaviour Misbehaviour) GetClientID() string { + return misbehaviour.ClientId +} + +// GetTime returns the timestamp at which misbehaviour occurred. It uses the +// maximum value from both headers to prevent producing an invalid header outside +// of the misbehaviour age range. +func (misbehaviour Misbehaviour) GetTime() time.Time { + t1, t2 := misbehaviour.Header1.GetTime(), misbehaviour.Header2.GetTime() + if t1.After(t2) { + return t1 + } + return t2 +} + +// ValidateBasic implements Misbehaviour interface +func (misbehaviour Misbehaviour) ValidateBasic() error { + if misbehaviour.Header1 == nil { + return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header1 cannot be nil") + } + if misbehaviour.Header2 == nil { + return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header2 cannot be nil") + } + if misbehaviour.Header1.TrustedHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header1 cannot have zero revision height") + } + if misbehaviour.Header2.TrustedHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header2 cannot have zero revision height") + } + if misbehaviour.Header1.Header.ChainID != misbehaviour.Header2.Header.ChainID { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers must have identical chainIDs") + } + + if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil { + return sdkerrors.Wrap(err, "misbehaviour client ID is invalid") + } + + // ValidateBasic on both validators + if err := misbehaviour.Header1.ValidateBasic(); err != nil { + return sdkerrors.Wrap( + clienttypes.ErrInvalidMisbehaviour, + sdkerrors.Wrap(err, "header 1 failed validation").Error(), + ) + } + if err := misbehaviour.Header2.ValidateBasic(); err != nil { + return sdkerrors.Wrap( + clienttypes.ErrInvalidMisbehaviour, + sdkerrors.Wrap(err, "header 2 failed validation").Error(), + ) + } + // Ensure that Height1 is greater than or equal to Height2 + if misbehaviour.Header1.GetHeight().LT(misbehaviour.Header2.GetHeight()) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "Header1 height is less than Header2 height (%s < %s)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight()) + } + + if err := misbehaviour.Header1.ValidateCommit(); err != nil { + return err + } + if err := misbehaviour.Header2.ValidateCommit(); err != nil { + return err + } + return nil +} diff --git a/modules/light-clients/01-dymint/types/misbehaviour_handle.go b/modules/light-clients/01-dymint/types/misbehaviour_handle.go new file mode 100644 index 00000000000..0e94bf20dd8 --- /dev/null +++ b/modules/light-clients/01-dymint/types/misbehaviour_handle.go @@ -0,0 +1,129 @@ +package types + +import ( + "bytes" + "strings" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// CheckMisbehaviourAndUpdateState determines whether or not two conflicting +// headers at the same height would have convinced the light client. +// +// NOTE: consensusState1 is the trusted consensus state that corresponds to the TrustedHeight +// of misbehaviour.Header1 +// Similarly, consensusState2 is the trusted consensus state that corresponds +// to misbehaviour.Header2 +// Misbehaviour sets frozen height to {0, 1} since it is only used as a boolean value (zero or non-zero). +func (cs ClientState) CheckMisbehaviourAndUpdateState( + ctx sdk.Context, + cdc codec.BinaryCodec, + clientStore sdk.KVStore, + misbehaviour exported.Misbehaviour, +) (exported.ClientState, error) { + tmMisbehaviour, ok := misbehaviour.(*Misbehaviour) + if !ok { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "expected type %T, got %T", misbehaviour, &Misbehaviour{}) + } + + // The status of the client is checked in 02-client + + // if heights are equal check that this is valid misbehaviour of a fork + // otherwise if heights are unequal check that this is valid misbehavior of BFT time violation + if tmMisbehaviour.Header1.GetHeight().EQ(tmMisbehaviour.Header2.GetHeight()) { + blockID1, err := tmtypes.BlockIDFromProto(&tmMisbehaviour.Header1.SignedHeader.Commit.BlockID) + if err != nil { + return nil, sdkerrors.Wrap(err, "invalid block ID from header 1 in misbehaviour") + } + blockID2, err := tmtypes.BlockIDFromProto(&tmMisbehaviour.Header2.SignedHeader.Commit.BlockID) + if err != nil { + return nil, sdkerrors.Wrap(err, "invalid block ID from header 2 in misbehaviour") + } + + // Ensure that Commit Hashes are different + if bytes.Equal(blockID1.Hash, blockID2.Hash) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers block hashes are equal") + } + // Header1 is at greater height than Header2, therefore Header1 time must be less than or equal to + // Header2 time in order to be valid misbehaviour (violation of monotonic time). + } else if tmMisbehaviour.Header1.SignedHeader.Header.Time.After(tmMisbehaviour.Header2.SignedHeader.Header.Time) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers are not at same height and are monotonically increasing") + } + + // Regardless of the type of misbehaviour, ensure that both headers are valid and would have been accepted by light-client + + // Retrieve trusted consensus states for each Header in misbehaviour + // and unmarshal from clientStore + + // Get consensus bytes from clientStore + tmConsensusState1, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header1.TrustedHeight) + if err != nil { + return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header1 at TrustedHeight: %s", tmMisbehaviour.Header1) + } + + // Get consensus bytes from clientStore + tmConsensusState2, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header2.TrustedHeight) + if err != nil { + return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header2 at TrustedHeight: %s", tmMisbehaviour.Header2) + } + + // Check the validity of the two conflicting headers against their respective + // trusted consensus states + // NOTE: header height and commitment root assertions are checked in + // misbehaviour.ValidateBasic by the client keeper and msg.ValidateBasic + // by the base application. + if err := checkMisbehaviourHeader( + &cs, tmConsensusState1, tmMisbehaviour.Header1, ctx.BlockTime(), + ); err != nil { + return nil, sdkerrors.Wrap(err, "verifying Header1 in Misbehaviour failed") + } + if err := checkMisbehaviourHeader( + &cs, tmConsensusState2, tmMisbehaviour.Header2, ctx.BlockTime(), + ); err != nil { + return nil, sdkerrors.Wrap(err, "verifying Header2 in Misbehaviour failed") + } + + cs.FrozenHeight = FrozenHeight + + return &cs, nil +} + +// checkMisbehaviourHeader checks that a Header in Misbehaviour is valid misbehaviour given +// a trusted ConsensusState +func checkMisbehaviourHeader( + clientState *ClientState, consState *ConsensusState, header *Header, currentTimestamp time.Time, +) error { + _, err := tmtypes.CommitFromProto(header.Commit) + if err != nil { + return sdkerrors.Wrap(err, "commit is not dymint commit type") + } + + // assert that the age of the trusted consensus state is not older than the trusting period + if currentTimestamp.Sub(consState.Timestamp) >= clientState.TrustingPeriod { + return sdkerrors.Wrapf( + ErrTrustingPeriodExpired, + "current timestamp minus the latest consensus state timestamp is greater than or equal to the trusting period (%d >= %d)", + currentTimestamp.Sub(consState.Timestamp), clientState.TrustingPeriod, + ) + } + + // swap out revision if exists and get chainID + clientStateChainID := strings.Split(clientState.GetChainID(), "-")[0] + headerChainID := strings.Split(header.Header.ChainID, "-")[0] + + if headerChainID != clientStateChainID { + return sdkerrors.Wrapf( + ErrInvalidChainID, + "expected %s{chainID}, got %s", clientStateChainID, headerChainID, + ) + } + + return nil +} diff --git a/modules/light-clients/01-dymint/types/misbehaviour_handle_test.go b/modules/light-clients/01-dymint/types/misbehaviour_handle_test.go new file mode 100644 index 00000000000..0318aab82d4 --- /dev/null +++ b/modules/light-clients/01-dymint/types/misbehaviour_handle_test.go @@ -0,0 +1,364 @@ +package types_test + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" + ibctestingmock "github.com/cosmos/ibc-go/v5/testing/mock" +) + +func (suite *DymintTestSuite) TestCheckMisbehaviourAndUpdateState() { + var chainDymint *ibctesting.TestChainDymint + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + altVal := tmtypes.NewValidator(altPubKey, 4) + + // Create bothValSet with both suite validator and altVal + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + bothValsHash := bothValSet.Hash() + + _, suiteVal := suite.valSet.GetByIndex(0) + + // Create signer array and ensure it is in same order as bothValSet + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1) + heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3) + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + chainDymint = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + } else { + chainDymint = suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + } + + testCases := []struct { + name string + clientState exported.ClientState + consensusState1 exported.ConsensusState + height1 clienttypes.Height + consensusState2 exported.ConsensusState + height2 clienttypes.Height + misbehaviour exported.Misbehaviour + timestamp time.Time + expPass bool + }{ + { + "valid fork misbehaviour", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid time misbehaviour", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid time misbehaviour header 1 stricly less than header 2", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Hour), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehavior at height greater than last consensusState", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour with different trusted heights", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour at a previous revision", + types.NewClientState(chainIDRevision1, trustingPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainIDRevision0, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainIDRevision0, int64(height.RevisionHeight+1), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour at a future revision", + types.NewClientState(chainIDRevision0, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainIDRevision0, 3, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainIDRevision0, 3, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour with trusted heights at a previous revision", + types.NewClientState(chainIDRevision1, trustingPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainIDRevision1, 1, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainIDRevision1, 1, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "consensus state's valset hash different from misbehaviour should still pass", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, suite.valSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "invalid fork misbehaviour: identical headers", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid time misbehaviour: monotonically increasing time", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid misbehavior misbehaviour from different chain", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader("ethermint", int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader("ethermint", int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid misbehavior misbehaviour with trusted height different from trusted consensus state", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "already frozen client state", + &types.ClientState{FrozenHeight: clienttypes.NewHeight(0, 1)}, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "trusted consensus state does not exist", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + nil, // consensus state for trusted height - 1 does not exist in store + clienttypes.Height{}, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid dymint misbehaviour", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + nil, + suite.now, + false, + }, + { + "provided height > header height", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "trusting period expired", + types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath), + types.NewConsensusState(time.Time{}, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: chainDymint.CreateDMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now.Add(trustingPeriod), + false, + }, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { + // reset suite to create fresh application state + suite.SetupTest() + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + chainDymint = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + } else { + chainDymint = suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + } + + // Set current timestamp in context + ctx := chainDymint.TC.GetContext().WithBlockTime(tc.timestamp) + + // Set trusted consensus states in client store + + if tc.consensusState1 != nil { + chainDymint.TC.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height1, tc.consensusState1) + } + if tc.consensusState2 != nil { + chainDymint.TC.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height2, tc.consensusState2) + } + + clientState, err := tc.clientState.CheckMisbehaviourAndUpdateState( + ctx, + chainDymint.TC.App.AppCodec(), + chainDymint.TC.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore + tc.misbehaviour, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().NotNil(clientState, "valid test case %d failed: %s", i, tc.name) + suite.Require().True(!clientState.(*types.ClientState).FrozenHeight.IsZero(), "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(clientState, "invalid test case %d passed: %s", i, tc.name) + } + }) + } +} diff --git a/modules/light-clients/01-dymint/types/misbehaviour_test.go b/modules/light-clients/01-dymint/types/misbehaviour_test.go new file mode 100644 index 00000000000..b9ecbff8463 --- /dev/null +++ b/modules/light-clients/01-dymint/types/misbehaviour_test.go @@ -0,0 +1,260 @@ +package types_test + +import ( + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" + ibctestingmock "github.com/cosmos/ibc-go/v5/testing/mock" +) + +func (suite *DymintTestSuite) TestMisbehaviour() { + var dymintChain *ibctesting.TestChainDymint + signers := make(map[string]tmtypes.PrivValidator) + signers[val.Address.String()] = privVal + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintChain = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + } else { + dymintChain = suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + } + + misbehaviour := &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + } + + suite.Require().Equal(exported.Dymint, misbehaviour.ClientType()) + suite.Require().Equal(clientID, misbehaviour.GetClientID()) +} + +func (suite *DymintTestSuite) TestMisbehaviourValidateBasic() { + var dymintChain *ibctesting.TestChainDymint + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + revisionHeight := int64(height.RevisionHeight) + + altVal := tmtypes.NewValidator(altPubKey, revisionHeight) + + // Create bothValSet with both suite validator and altVal + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + // Create alternative validator set with only altVal + altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal}) + + signers := make(map[string]tmtypes.PrivValidator) + signers[altVal.Address.String()] = altPrivVal + + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + altSigners := make(map[string]tmtypes.PrivValidator, 1) + altSigners[altVal.Address.String()] = altPrivVal + + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintChain = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + } else { + dymintChain = suite.chainB.TestChainClient.(*ibctesting.TestChainDymint) + } + + testCases := []struct { + name string + misbehaviour *types.Misbehaviour + malleateMisbehaviour func(misbehaviour *types.Misbehaviour) error + expPass bool + }{ + { + "valid fork misbehaviour, two headers at same height have different time", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + true, + }, + { + "valid time misbehaviour, both headers at different heights are at same time", + &types.Misbehaviour{ + Header1: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight+5), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + true, + }, + { + "misbehaviour Header1 is nil", + types.NewMisbehaviour(clientID, nil, suite.header), + func(m *types.Misbehaviour) error { return nil }, + false, + }, + { + "misbehaviour Header2 is nil", + types.NewMisbehaviour(clientID, suite.header, nil), + func(m *types.Misbehaviour) error { return nil }, + false, + }, + { + "valid misbehaviour with different trusted headers", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.NewHeight(0, height.RevisionHeight-3), suite.now.Add(time.Minute), suite.valSet, bothValSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + true, + }, + { + "trusted height is 0 in Header1", + &types.Misbehaviour{ + Header1: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "trusted height is 0 in Header2", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + // { + // "trusted valset is nil in Header1", + // &types.Misbehaviour{ + // Header1: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers), + // Header2: suite.header, + // ClientId: clientID, + // }, + // func(misbehaviour *types.Misbehaviour) error { return nil }, + // false, + // }, + // { + // "trusted valset is nil in Header2", + // &types.Misbehaviour{ + // Header1: suite.header, + // Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers), + // ClientId: clientID, + // }, + // func(misbehaviour *types.Misbehaviour) error { return nil }, + // false, + // }, + { + "invalid client ID ", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: "GAIA", + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "chainIDs do not match", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader("ethermint", int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "header2 height is greater", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, 6, clienttypes.NewHeight(0, height.RevisionHeight+1), suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "header 1 doesn't have 2/3 majority", + &types.Misbehaviour{ + Header1: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + // voteSet contains only altVal which is less than 2/3 of total power (height/1height) + wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header1.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet) + blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.Commit.BlockID) + if err != nil { + return err + } + + tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header1.Commit.Round, wrongVoteSet, altSigners, suite.now) + misbehaviour.Header1.Commit = tmCommit.ToProto() + return err + }, + false, + }, + { + "header 2 doesn't have 2/3 majority", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + // voteSet contains only altVal which is less than 2/3 of total power (height/1height) + wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet) + blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header2.Commit.BlockID) + if err != nil { + return err + } + + tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header2.Commit.Round, wrongVoteSet, altSigners, suite.now) + misbehaviour.Header2.Commit = tmCommit.ToProto() + return err + }, + false, + }, + { + "validators sign off on wrong commit", + &types.Misbehaviour{ + Header1: suite.header, + Header2: dymintChain.CreateDMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + tmBlockID := ibctesting.MakeBlockID(tmhash.Sum([]byte("other_hash")), 3, tmhash.Sum([]byte("other_partset"))) + misbehaviour.Header2.Commit.BlockID = tmBlockID.ToProto() + return nil + }, + false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.malleateMisbehaviour(tc.misbehaviour) + suite.Require().NoError(err) + + if tc.expPass { + suite.Require().NoError(tc.misbehaviour.ValidateBasic(), "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(tc.misbehaviour.ValidateBasic(), "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/modules/light-clients/01-dymint/types/proposal_handle.go b/modules/light-clients/01-dymint/types/proposal_handle.go new file mode 100644 index 00000000000..8c7908f577d --- /dev/null +++ b/modules/light-clients/01-dymint/types/proposal_handle.go @@ -0,0 +1,97 @@ +package types + +import ( + "reflect" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// CheckSubstituteAndUpdateState will try to update the client with the state of the +// substitute. +// +// AllowUpdateAfterMisbehaviour and AllowUpdateAfterExpiry have been deprecated. +// Please see ADR 026 for more information. +// +// The following must always be true: +// - The substitute client is the same type as the subject client +// - The subject and substitute client states match in all parameters (expect frozen height, latest height, and chain-id) +// +// In case 1) before updating the client, the client will be unfrozen by resetting +// the FrozenHeight to the zero Height. +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, + substituteClientStore sdk.KVStore, substituteClient exported.ClientState, +) (exported.ClientState, error) { + substituteClientState, ok := substituteClient.(*ClientState) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidClient, "expected type %T, got %T", &ClientState{}, substituteClient, + ) + } + + if !IsMatchingClientState(cs, *substituteClientState) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidSubstitute, "subject client state does not match substitute client state") + } + + if cs.Status(ctx, subjectClientStore, cdc) == exported.Frozen { + // unfreeze the client + cs.FrozenHeight = clienttypes.ZeroHeight() + } + + // copy consensus states and processed time from substitute to subject + // starting from initial height and ending on the latest height (inclusive) + height := substituteClientState.GetLatestHeight() + + consensusState, err := GetConsensusState(substituteClientStore, cdc, height) + if err != nil { + return nil, sdkerrors.Wrap(err, "unable to retrieve latest consensus state for substitute client") + } + + SetConsensusState(subjectClientStore, cdc, consensusState, height) + + // set metadata stored for the substitute consensus state + processedHeight, found := GetProcessedHeight(substituteClientStore, height) + if !found { + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "unable to retrieve processed height for substitute client latest height") + } + + processedTime, found := GetProcessedTime(substituteClientStore, height) + if !found { + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "unable to retrieve processed time for substitute client latest height") + } + + setConsensusMetadataWithValues(subjectClientStore, height, processedHeight, processedTime) + + cs.LatestHeight = substituteClientState.LatestHeight + cs.ChainId = substituteClientState.ChainId + + // set new trusting period based on the substitute client state + cs.TrustingPeriod = substituteClientState.TrustingPeriod + + // no validation is necessary since the substitute is verified to be Active + // in 02-client. + + return &cs, nil +} + +// IsMatchingClientState returns true if all the client state parameters match +// except for frozen height, latest height, trusting period, chain-id. +func IsMatchingClientState(subject, substitute ClientState) bool { + // zero out parameters which do not need to match + subject.LatestHeight = clienttypes.ZeroHeight() + subject.FrozenHeight = clienttypes.ZeroHeight() + subject.TrustingPeriod = time.Duration(0) + substitute.LatestHeight = clienttypes.ZeroHeight() + substitute.FrozenHeight = clienttypes.ZeroHeight() + substitute.TrustingPeriod = time.Duration(0) + subject.ChainId = "" + substitute.ChainId = "" + + return reflect.DeepEqual(subject, substitute) +} diff --git a/modules/light-clients/01-dymint/types/proposal_handle_test.go b/modules/light-clients/01-dymint/types/proposal_handle_test.go new file mode 100644 index 00000000000..c237b6337a5 --- /dev/null +++ b/modules/light-clients/01-dymint/types/proposal_handle_test.go @@ -0,0 +1,358 @@ +package types_test + +import ( + fmt "fmt" + "time" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + tmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" +) + +var frozenHeight = clienttypes.NewHeight(0, 1) + +func (suite *DymintTestSuite) TestCheckSubstituteUpdateStateBasic() { + var ( + substituteClientState exported.ClientState + substitutePath *ibctesting.Path + ) + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine used for substitute", func() { + substituteClientState = ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1).ClientState() + }, + }, + { + "non-matching substitute", func() { + suite.coordinator.SetupClients(substitutePath) + // substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID) + // switch substituteClientState.ClientType() { + // case exported.Dymint: + // dmClientState, ok := substituteClientState.(*types.ClientState) + // suite.Require().True(ok) + // // change trusting period so that test should fail + // dmClientState.TrustingPeriod = time.Hour * 24 * 7 + // dmClientState.ChainId = dmClientState.ChainId + "different chain" + // case exported.Tendermint: + // tmClientState, ok := substituteClientState.(*tmtypes.ClientState) + // suite.Require().True(ok) + // // change trusting period so that test should fail + // tmClientState.TrustingPeriod = time.Hour * 24 * 7 + // tmClientState.ChainId = tmClientState.ChainId + "different chain" + // default: + // panic(fmt.Sprintf("client type %s is not supported", substituteClientState.ClientType())) + // } + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB) + + suite.coordinator.SetupClients(subjectPath) + subjectClientState := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID) + switch subjectClientState.ClientType() { + case exported.Dymint: + subjectDMClientState := subjectClientState.(*types.ClientState) + // expire subject client + suite.coordinator.IncrementTimeBy(subjectDMClientState.TrustingPeriod) + case exported.Tendermint: + subjectTMClientState := subjectClientState.(*tmtypes.ClientState) + // expire subject client + suite.coordinator.IncrementTimeBy(subjectTMClientState.TrustingPeriod) + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + + tc.malleate() + + subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) + substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID) + + updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) + suite.Require().Error(err) + suite.Require().Nil(updatedClient) + }) + } +} + +// to expire clients, time needs to be fast forwarded on both chainA and chainB. +// this is to prevent headers from failing when attempting to update later. +func (suite *DymintTestSuite) TestCheckSubstituteAndUpdateState() { + testCases := []struct { + name string + FreezeClient bool + ExpireClient bool + expPass bool + }{ + { + name: "PASS: update checks are deprecated, client is frozen and expired", + FreezeClient: true, + ExpireClient: true, + expPass: true, + }, + { + name: "PASS: update checks are deprecated, not frozen or expired", + FreezeClient: false, + ExpireClient: false, + expPass: true, + }, + { + name: "PASS: update checks are deprecated, not frozen or expired", + FreezeClient: false, + ExpireClient: false, + expPass: true, + }, + { + name: "PASS: update checks are deprecated, client is frozen", + FreezeClient: true, + ExpireClient: false, + expPass: true, + }, + { + name: "PASS: update checks are deprecated, client is expired", + FreezeClient: false, + ExpireClient: true, + expPass: true, + }, + } + + for _, tc := range testCases { + tc := tc + + // for each test case a header used for unexpiring clients and unfreezing + // a client are each tested to ensure that unexpiry headers cannot update + // a client when a unfreezing header is required. + suite.Run(tc.name, func() { + // start by testing unexpiring the client + suite.SetupTest() // reset + + // construct subject using test case parameters + subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(subjectPath) + + subjectClientState := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID) + switch subjectClientState.ClientType() { + case exported.Dymint: + subjectDMClientState := subjectClientState.(*types.ClientState) + + // apply freezing or expiry as determined by the test case + if tc.FreezeClient { + subjectDMClientState.FrozenHeight = frozenHeight + } + if tc.ExpireClient { + // expire subject client + suite.coordinator.IncrementTimeBy(subjectDMClientState.TrustingPeriod) + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + } + case exported.Tendermint: + subjectTMClientState := subjectClientState.(*tmtypes.ClientState) + + // apply freezing or expiry as determined by the test case + if tc.FreezeClient { + subjectTMClientState.FrozenHeight = frozenHeight + } + if tc.ExpireClient { + // expire subject client + suite.coordinator.IncrementTimeBy(subjectTMClientState.TrustingPeriod) + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + } + + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + + // construct the substitute to match the subject client + // NOTE: the substitute is explicitly created after the freezing or expiry occurs, + // primarily to prevent the substitute from becoming frozen. It also should be + // the natural flow of events in practice. The subject will become frozen/expired + // and a substitute will be created along with a governance proposal as a response + + substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(substitutePath) + + substituteClientState := suite.chainA.GetClientState(substitutePath.EndpointA.ClientID) + switch substituteClientState.ClientType() { + case exported.Dymint: + substituteDMClientState := substituteClientState.(*types.ClientState) + // update trusting period of substitute client state + substituteDMClientState.TrustingPeriod = time.Hour * 24 * 7 + case exported.Tendermint: + substituteTMClientState := substituteClientState.(*tmtypes.ClientState) + // update trusting period of substitute client state + substituteTMClientState.TrustingPeriod = time.Hour * 24 * 7 + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, substituteClientState) + + // update substitute a few times + for i := 0; i < 3; i++ { + err := substitutePath.EndpointA.UpdateClient() + suite.Require().NoError(err) + // skip a block + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + } + + // test that subject gets updated chain-id + newChainID := "new-chain-id" + // get updated substitute + substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID) + switch substituteClientState.ClientType() { + case exported.Dymint: + substituteDMClientState := substituteClientState.(*types.ClientState) + substituteDMClientState.ChainId = newChainID + case exported.Tendermint: + substituteTMClientState := substituteClientState.(*tmtypes.ClientState) + substituteTMClientState.ChainId = newChainID + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + + subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) + substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID) + + expectedConsState := substitutePath.EndpointA.GetConsensusState(substituteClientState.GetLatestHeight()) + expectedProcessedTime, found := types.GetProcessedTime(substituteClientStore, substituteClientState.GetLatestHeight()) + suite.Require().True(found) + expectedProcessedHeight, found := types.GetProcessedTime(substituteClientStore, substituteClientState.GetLatestHeight()) + suite.Require().True(found) + expectedIterationKey := types.GetIterationKey(substituteClientStore, substituteClientState.GetLatestHeight()) + + updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) + + if tc.expPass { + suite.Require().NoError(err) + + updatedClientChainId := newChainID + FrozenHeight := clienttypes.ZeroHeight() + TrustingPeriod := time.Hour * 24 * 7 + switch updatedClient.ClientType() { + case exported.Dymint: + updatedClientChainId = updatedClient.(*types.ClientState).ChainId + FrozenHeight = updatedClient.(*types.ClientState).FrozenHeight + TrustingPeriod = updatedClient.(*types.ClientState).TrustingPeriod + case exported.Tendermint: + updatedClientChainId = updatedClient.(*tmtypes.ClientState).ChainId + FrozenHeight = updatedClient.(*tmtypes.ClientState).FrozenHeight + TrustingPeriod = updatedClient.(*tmtypes.ClientState).TrustingPeriod + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + suite.Require().Equal(clienttypes.ZeroHeight(), FrozenHeight) + + subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) + + // check that the correct consensus state was copied over + suite.Require().Equal(substituteClientState.GetLatestHeight(), updatedClient.GetLatestHeight()) + subjectConsState := subjectPath.EndpointA.GetConsensusState(updatedClient.GetLatestHeight()) + subjectProcessedTime, found := types.GetProcessedTime(subjectClientStore, updatedClient.GetLatestHeight()) + suite.Require().True(found) + subjectProcessedHeight, found := types.GetProcessedTime(substituteClientStore, updatedClient.GetLatestHeight()) + suite.Require().True(found) + subjectIterationKey := types.GetIterationKey(substituteClientStore, updatedClient.GetLatestHeight()) + + suite.Require().Equal(expectedConsState, subjectConsState) + suite.Require().Equal(expectedProcessedTime, subjectProcessedTime) + suite.Require().Equal(expectedProcessedHeight, subjectProcessedHeight) + suite.Require().Equal(expectedIterationKey, subjectIterationKey) + + suite.Require().Equal(newChainID, updatedClientChainId) + suite.Require().Equal(time.Hour*24*7, TrustingPeriod) + } else { + suite.Require().Error(err) + suite.Require().Nil(updatedClient) + } + }) + } +} + +func (suite *DymintTestSuite) TestIsMatchingClientState() { + var ( + subjectPath, substitutePath *ibctesting.Path + subjectClientState, substituteClientState *types.ClientState + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "matching clients", func() { + switch suite.chainA.TestChainClient.GetSelfClientType() { + case exported.Dymint: + // ChainBs' counterparty client is Dymint + subjectClientState = suite.chainB.GetClientState(subjectPath.EndpointB.ClientID).(*types.ClientState) + substituteClientState = suite.chainB.GetClientState(substitutePath.EndpointB.ClientID).(*types.ClientState) + case exported.Tendermint: + // ChainAs' counterparty client is Dymint + subjectClientState = suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*types.ClientState) + substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState) + default: + panic(fmt.Sprintf("client type %s is not supported", subjectClientState.ClientType())) + } + }, true, + }, + { + "matching, frozen height is not used in check for equality", func() { + subjectClientState.FrozenHeight = frozenHeight + substituteClientState.FrozenHeight = clienttypes.ZeroHeight() + }, true, + }, + { + "matching, latest height is not used in check for equality", func() { + subjectClientState.LatestHeight = clienttypes.NewHeight(0, 10) + substituteClientState.FrozenHeight = clienttypes.ZeroHeight() + }, true, + }, + { + "matching, chain id is different", func() { + subjectClientState.ChainId = "bitcoin" + substituteClientState.ChainId = "ethereum" + }, true, + }, + { + "matching, trusting period is different", func() { + subjectClientState.TrustingPeriod = time.Duration(time.Hour * 10) + substituteClientState.TrustingPeriod = time.Duration(time.Hour * 1) + }, true, + }, + { + "not matching, trust level is different", func() { + subjectClientState.TrustLevel = types.Fraction{2, 3} + substituteClientState.TrustLevel = types.Fraction{1, 3} + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + subjectPath = ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(subjectPath) + suite.coordinator.SetupClients(substitutePath) + + tc.malleate() + + suite.Require().Equal(tc.expPass, types.IsMatchingClientState(*subjectClientState, *substituteClientState)) + }) + } +} diff --git a/modules/light-clients/01-dymint/types/self_client.go b/modules/light-clients/01-dymint/types/self_client.go new file mode 100644 index 00000000000..85aff8f3b7c --- /dev/null +++ b/modules/light-clients/01-dymint/types/self_client.go @@ -0,0 +1,99 @@ +package types + +import ( + "reflect" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ exported.SelfClient = (*SelfClient)(nil) + +type SelfClient struct{} + +// NewClientState creates a new ClientState instance +func NewSelfClient() exported.SelfClient { + return &SelfClient{} +} + +// ValidateSelfClientState validates the client parameters for a client of the running chain +// This function is only used to validate the client state the counterparty stores for this chain +// Client must be in same revision as the executing chain +// dymint doesn't care about the unbonding period, so ignore it +func (sc SelfClient) ValidateSelfClientState( + ctx sdk.Context, + expectedUbdPeriod time.Duration, + clientState exported.ClientState, +) error { + tmClient, ok := clientState.(*ClientState) + if !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client must be a Dymint client, expected: %T, got: %T", + &ClientState{}, tmClient) + } + + if !tmClient.FrozenHeight.IsZero() { + return clienttypes.ErrClientFrozen + } + + if ctx.ChainID() != tmClient.ChainId { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid chain-id. expected: %s, got: %s", + ctx.ChainID(), tmClient.ChainId) + } + + revision := clienttypes.ParseChainID(ctx.ChainID()) + + // client must be in the same revision as executing chain + if tmClient.LatestHeight.RevisionNumber != revision { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client is not in the same revision as the chain. expected revision: %d, got: %d", + tmClient.LatestHeight.RevisionNumber, revision) + } + + selfHeight := clienttypes.NewHeight(revision, uint64(ctx.BlockHeight())) + if tmClient.LatestHeight.GTE(selfHeight) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client has LatestHeight %d greater than or equal to chain height %d", + tmClient.LatestHeight, selfHeight) + } + + expectedProofSpecs := commitmenttypes.GetSDKSpecs() + if !reflect.DeepEqual(expectedProofSpecs, tmClient.ProofSpecs) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client has invalid proof specs. expected: %v got: %v", + expectedProofSpecs, tmClient.ProofSpecs) + } + + if len(tmClient.UpgradePath) != 0 { + // For now, SDK IBC implementation assumes that upgrade path (if defined) is defined by SDK upgrade module + expectedUpgradePath := []string{upgradetypes.StoreKey, upgradetypes.KeyUpgradedIBCState} + if !reflect.DeepEqual(expectedUpgradePath, tmClient.UpgradePath) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "upgrade path must be the upgrade path defined by upgrade module. expected %v, got %v", + expectedUpgradePath, tmClient.UpgradePath) + } + } + return nil +} + +func (sc SelfClient) GetSelfConsensusStateFromBlocHeader( + cdc codec.BinaryCodec, + blockHeader []byte, +) (exported.ConsensusState, error) { + // unmarshal block header + tmBlockHeader := &tmproto.Header{} + if err := cdc.Unmarshal(blockHeader, tmBlockHeader); err != nil { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidHeader, "could not unmarshal block header: %v", err) + } + return NewConsensusState(tmBlockHeader.Time, + commitmenttypes.NewMerkleRoot(tmBlockHeader.GetAppHash()), + tmBlockHeader.NextValidatorsHash), nil +} + +func (sc SelfClient) ClientType() string { + return exported.Dymint +} diff --git a/modules/light-clients/01-dymint/types/store.go b/modules/light-clients/01-dymint/types/store.go new file mode 100644 index 00000000000..b9d5a680199 --- /dev/null +++ b/modules/light-clients/01-dymint/types/store.go @@ -0,0 +1,370 @@ +package types + +import ( + "bytes" + "encoding/binary" + "strings" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +/* +This file contains the logic for storage and iteration over `IterationKey` metadata that is stored +for each consensus state. The consensus state key specified in ICS-24 and expected by counterparty chains +stores the consensus state under the key: `consensusStates/{revision_number}-{revision_height}`, with each number +represented as a string. +While this works fine for IBC proof verification, it makes efficient iteration difficult since the lexicographic order +of the consensus state keys do not match the height order of consensus states. This makes consensus state pruning and +monotonic time enforcement difficult since it is inefficient to find the earliest consensus state or to find the neigboring +consensus states given a consensus state height. +Changing the ICS-24 representation will be a major breaking change that requires counterparty chains to accept a new key format. +Thus to avoid breaking IBC, we can store a lookup from a more efficiently formatted key: `iterationKey` to the consensus state key which +stores the underlying consensus state. This efficient iteration key will be formatted like so: `iterateConsensusStates{BigEndianRevisionBytes}{BigEndianHeightBytes}`. +This ensures that the lexicographic order of iteration keys match the height order of the consensus states. Thus, we can use the SDK store's +Iterators to iterate over the consensus states in ascending/descending order by providing a mapping from `iterationKey -> consensusStateKey -> ConsensusState`. +A future version of IBC may choose to replace the ICS24 ConsensusState path with the more efficient format and make this indirection unnecessary. +*/ + +const KeyIterateConsensusStatePrefix = "iterateConsensusStates" + +var ( + // KeyProcessedTime is appended to consensus state key to store the processed time + KeyProcessedTime = []byte("/processedTime") + // KeyProcessedHeight is appended to consensus state key to store the processed height + KeyProcessedHeight = []byte("/processedHeight") + // KeyIteration stores the key mapping to consensus state key for efficient iteration + KeyIteration = []byte("/iterationKey") +) + +// SetConsensusState stores the consensus state at the given height. +func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, consensusState *ConsensusState, height exported.Height) { + key := host.ConsensusStateKey(height) + val := clienttypes.MustMarshalConsensusState(cdc, consensusState) + clientStore.Set(key, val) +} + +// GetConsensusState retrieves the consensus state from the client prefixed +// store. An error is returned if the consensus state does not exist. +func GetConsensusState(store sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, error) { + bz := store.Get(host.ConsensusStateKey(height)) + if bz == nil { + return nil, sdkerrors.Wrapf( + clienttypes.ErrConsensusStateNotFound, + "consensus state does not exist for height %s", height, + ) + } + + consensusStateI, err := clienttypes.UnmarshalConsensusState(cdc, bz) + if err != nil { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "unmarshal error: %v", err) + } + + consensusState, ok := consensusStateI.(*ConsensusState) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidConsensus, + "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}, + ) + } + + return consensusState, nil +} + +// deleteConsensusState deletes the consensus state at the given height +func deleteConsensusState(clientStore sdk.KVStore, height exported.Height) { + key := host.ConsensusStateKey(height) + clientStore.Delete(key) +} + +// IterateConsensusMetadata iterates through the prefix store and applies the callback. +// If the cb returns true, then iterator will close and stop. +func IterateConsensusMetadata(store sdk.KVStore, cb func(key, val []byte) bool) { + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConsensusStatePrefix)) + + // iterate over processed time and processed height + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // processed time key in prefix store has format: "consensusState//processedTime" + if len(keySplit) != 3 { + // ignore all consensus state keys + continue + } + + if keySplit[2] != "processedTime" && keySplit[2] != "processedHeight" { + // only perform callback on consensus metadata + continue + } + + if cb(iterator.Key(), iterator.Value()) { + break + } + } + + // iterate over iteration keys + iterator = sdk.KVStorePrefixIterator(store, []byte(KeyIterateConsensusStatePrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + if cb(iterator.Key(), iterator.Value()) { + break + } + } +} + +// ProcessedTimeKey returns the key under which the processed time will be stored in the client store. +func ProcessedTimeKey(height exported.Height) []byte { + return append(host.ConsensusStateKey(height), KeyProcessedTime...) +} + +// SetProcessedTime stores the time at which a header was processed and the corresponding consensus state was created. +// This is useful when validating whether a packet has reached the time specified delay period in the dymint client's +// verification functions +func SetProcessedTime(clientStore sdk.KVStore, height exported.Height, timeNs uint64) { + key := ProcessedTimeKey(height) + val := sdk.Uint64ToBigEndian(timeNs) + clientStore.Set(key, val) +} + +// GetProcessedTime gets the time (in nanoseconds) at which this chain received and processed a dymint header. +// This is used to validate that a received packet has passed the time delay period. +func GetProcessedTime(clientStore sdk.KVStore, height exported.Height) (uint64, bool) { + key := ProcessedTimeKey(height) + bz := clientStore.Get(key) + if bz == nil { + return 0, false + } + return sdk.BigEndianToUint64(bz), true +} + +// deleteProcessedTime deletes the processedTime for a given height +func deleteProcessedTime(clientStore sdk.KVStore, height exported.Height) { + key := ProcessedTimeKey(height) + clientStore.Delete(key) +} + +// ProcessedHeightKey returns the key under which the processed height will be stored in the client store. +func ProcessedHeightKey(height exported.Height) []byte { + return append(host.ConsensusStateKey(height), KeyProcessedHeight...) +} + +// SetProcessedHeight stores the height at which a header was processed and the corresponding consensus state was created. +// This is useful when validating whether a packet has reached the specified block delay period in the dymint client's +// verification functions +func SetProcessedHeight(clientStore sdk.KVStore, consHeight, processedHeight exported.Height) { + key := ProcessedHeightKey(consHeight) + val := []byte(processedHeight.String()) + clientStore.Set(key, val) +} + +// GetProcessedHeight gets the height at which this chain received and processed a dymint header. +// This is used to validate that a received packet has passed the block delay period. +func GetProcessedHeight(clientStore sdk.KVStore, height exported.Height) (exported.Height, bool) { + key := ProcessedHeightKey(height) + bz := clientStore.Get(key) + if bz == nil { + return nil, false + } + processedHeight, err := clienttypes.ParseHeight(string(bz)) + if err != nil { + return nil, false + } + return processedHeight, true +} + +// deleteProcessedHeight deletes the processedHeight for a given height +func deleteProcessedHeight(clientStore sdk.KVStore, height exported.Height) { + key := ProcessedHeightKey(height) + clientStore.Delete(key) +} + +// IterationKey returns the key under which the consensus state key will be stored. +// The iteration key is a BigEndian representation of the consensus state key to support efficient iteration. +func IterationKey(height exported.Height) []byte { + heightBytes := bigEndianHeightBytes(height) + return append([]byte(KeyIterateConsensusStatePrefix), heightBytes...) +} + +// SetIterationKey stores the consensus state key under a key that is more efficient for ordered iteration +func SetIterationKey(clientStore sdk.KVStore, height exported.Height) { + key := IterationKey(height) + val := host.ConsensusStateKey(height) + clientStore.Set(key, val) +} + +// GetIterationKey returns the consensus state key stored under the efficient iteration key. +// NOTE: This function is currently only used for testing purposes +func GetIterationKey(clientStore sdk.KVStore, height exported.Height) []byte { + key := IterationKey(height) + return clientStore.Get(key) +} + +// deleteIterationKey deletes the iteration key for a given height +func deleteIterationKey(clientStore sdk.KVStore, height exported.Height) { + key := IterationKey(height) + clientStore.Delete(key) +} + +// GetHeightFromIterationKey takes an iteration key and returns the height that it references +func GetHeightFromIterationKey(iterKey []byte) exported.Height { + bigEndianBytes := iterKey[len([]byte(KeyIterateConsensusStatePrefix)):] + revisionBytes := bigEndianBytes[0:8] + heightBytes := bigEndianBytes[8:] + revision := binary.BigEndian.Uint64(revisionBytes) + height := binary.BigEndian.Uint64(heightBytes) + return clienttypes.NewHeight(revision, height) +} + +// IterateConsensusStateAscending iterates through the consensus states in ascending order. It calls the provided +// callback on each height, until stop=true is returned. +func IterateConsensusStateAscending(clientStore sdk.KVStore, cb func(height exported.Height) (stop bool)) error { + iterator := sdk.KVStorePrefixIterator(clientStore, []byte(KeyIterateConsensusStatePrefix)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + iterKey := iterator.Key() + height := GetHeightFromIterationKey(iterKey) + if cb(height) { + return nil + } + } + return nil +} + +// GetNextConsensusState returns the lowest consensus state that is larger than the given height. +// The Iterator returns a storetypes.Iterator which iterates from start (inclusive) to end (exclusive). +// If the starting height exists in store, we need to call iterator.Next() to get the next consenus state. +// Otherwise, the iterator is already at the next consensus state so we can call iterator.Value() immediately. +func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, bool) { + iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix)) + iterator := iterateStore.Iterator(bigEndianHeightBytes(height), nil) + defer iterator.Close() + if !iterator.Valid() { + return nil, false + } + + // if iterator is at current height, ignore the consensus state at current height and get next height + // if iterator value is not at current height, it is already at next height. + if bytes.Equal(iterator.Value(), host.ConsensusStateKey(height)) { + iterator.Next() + if !iterator.Valid() { + return nil, false + } + } + + csKey := iterator.Value() + + return getTmConsensusState(clientStore, cdc, csKey) +} + +// GetPreviousConsensusState returns the highest consensus state that is lower than the given height. +// The Iterator returns a storetypes.Iterator which iterates from the end (exclusive) to start (inclusive). +// Thus to get previous consensus state we call iterator.Value() immediately. +func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, bool) { + iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix)) + iterator := iterateStore.ReverseIterator(nil, bigEndianHeightBytes(height)) + defer iterator.Close() + + if !iterator.Valid() { + return nil, false + } + + csKey := iterator.Value() + + return getTmConsensusState(clientStore, cdc, csKey) +} + +// PruneAllExpiredConsensusStates iterates over all consensus states for a given +// client store. If a consensus state is expired, it is deleted and its metadata +// is deleted. +func PruneAllExpiredConsensusStates( + ctx sdk.Context, clientStore sdk.KVStore, + cdc codec.BinaryCodec, clientState *ClientState, +) (err error) { + var heights []exported.Height + + pruneCb := func(height exported.Height) bool { + consState, err := GetConsensusState(clientStore, cdc, height) + // this error should never occur + if err != nil { + return true + } + + if clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) { + heights = append(heights, height) + } + + return false + } + + err = IterateConsensusStateAscending(clientStore, pruneCb) + if err != nil { + return err + } + + for _, height := range heights { + deleteConsensusState(clientStore, height) + deleteConsensusMetadata(clientStore, height) + } + + return nil +} + +// Helper function for GetNextConsensusState and GetPreviousConsensusState +func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, key []byte) (*ConsensusState, bool) { + bz := clientStore.Get(key) + if bz == nil { + return nil, false + } + + consensusStateI, err := clienttypes.UnmarshalConsensusState(cdc, bz) + if err != nil { + return nil, false + } + + consensusState, ok := consensusStateI.(*ConsensusState) + if !ok { + return nil, false + } + return consensusState, true +} + +func bigEndianHeightBytes(height exported.Height) []byte { + heightBytes := make([]byte, 16) + binary.BigEndian.PutUint64(heightBytes, height.GetRevisionNumber()) + binary.BigEndian.PutUint64(heightBytes[8:], height.GetRevisionHeight()) + return heightBytes +} + +// setConsensusMetadata sets context time as processed time and set context height as processed height +// as this is internal dymint light client logic. +// client state and consensus state will be set by client keeper +// set iteration key to provide ability for efficient ordered iteration of consensus states. +func setConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, height exported.Height) { + setConsensusMetadataWithValues(clientStore, height, clienttypes.GetSelfHeight(ctx), uint64(ctx.BlockTime().UnixNano())) +} + +// setConsensusMetadataWithValues sets the consensus metadata with the provided values +func setConsensusMetadataWithValues( + clientStore sdk.KVStore, height, + processedHeight exported.Height, + processedTime uint64, +) { + SetProcessedTime(clientStore, height, processedTime) + SetProcessedHeight(clientStore, height, processedHeight) + SetIterationKey(clientStore, height) +} + +// deleteConsensusMetadata deletes the metadata stored for a particular consensus state. +func deleteConsensusMetadata(clientStore sdk.KVStore, height exported.Height) { + deleteProcessedTime(clientStore, height) + deleteProcessedHeight(clientStore, height) + deleteIterationKey(clientStore, height) +} diff --git a/modules/light-clients/01-dymint/types/store_test.go b/modules/light-clients/01-dymint/types/store_test.go new file mode 100644 index 00000000000..b00e485470e --- /dev/null +++ b/modules/light-clients/01-dymint/types/store_test.go @@ -0,0 +1,248 @@ +package types_test + +import ( + "math" + "time" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + host "github.com/cosmos/ibc-go/v5/modules/core/24-host" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + solomachinetypes "github.com/cosmos/ibc-go/v5/modules/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" +) + +func (suite *DymintTestSuite) TestGetConsensusState() { + var ( + height exported.Height + path *ibctesting.Path + dymintCounterpartyChain *ibctesting.TestChain + endpointClientID string + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "success", func() {}, true, + }, + { + "consensus state not found", func() { + // use height with no consensus state set + height = height.(clienttypes.Height).Increment() + }, false, + }, + { + "not a consensus state interface", func() { + // marshal an empty client state and set as consensus state + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpointClientID) + clientStateBz := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.MustMarshalClientState(&types.ClientState{}) + store.Set(host.ConsensusStateKey(height), clientStateBz) + }, false, + }, + { + "invalid consensus state (solomachine)", func() { + // marshal and set solomachine consensus state + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpointClientID) + consensusStateBz := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.MustMarshalConsensusState(&solomachinetypes.ConsensusState{}) + store.Set(host.ConsensusStateKey(height), consensusStateBz) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + path = ibctesting.NewPath(suite.chainA, suite.chainB) + + suite.coordinator.Setup(path) + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintCounterpartyChain = suite.chainB + endpointClientID = path.EndpointB.ClientID + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + endpointClientID = path.EndpointA.ClientID + } + + clientState := dymintCounterpartyChain.GetClientState(endpointClientID) + height = clientState.GetLatestHeight() + + tc.malleate() // change vars as necessary + + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpointClientID) + consensusState, err := types.GetConsensusState(store, dymintCounterpartyChain.Codec, height) + + if tc.expPass { + suite.Require().NoError(err) + expConsensusState, found := dymintCounterpartyChain.GetConsensusState(endpointClientID, height) + suite.Require().True(found) + suite.Require().Equal(expConsensusState, consensusState) + } else { + suite.Require().Error(err) + suite.Require().Nil(consensusState) + } + }) + } +} + +func (suite *DymintTestSuite) TestGetProcessedTime() { + var ( + dymintCounterpartyChain *ibctesting.TestChain + endpoint *ibctesting.Endpoint + expectedTime time.Time + ) + // setup + path := ibctesting.NewPath(suite.chainA, suite.chainB) + + suite.coordinator.UpdateTime() + + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Tendermint { + // chainA must be Dymint + dymintCounterpartyChain = suite.chainB + endpoint = path.EndpointB + // coordinator increments time before creating client + expectedTime = dymintCounterpartyChain.TestChainClient.(*ibctesting.TestChainTendermint).CurrentHeader.Time.Add(ibctesting.TimeIncrement) + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + endpoint = path.EndpointA + if dymintCounterpartyChain.TestChainClient.GetSelfClientType() == exported.Tendermint { + expectedTime = dymintCounterpartyChain.TestChainClient.(*ibctesting.TestChainTendermint).CurrentHeader.Time.Add(ibctesting.TimeIncrement) + } else { + expectedTime = dymintCounterpartyChain.TestChainClient.(*ibctesting.TestChainDymint).CurrentHeader.Time.Add(ibctesting.TimeIncrement) + } + } + + // Verify ProcessedTime on CreateClient + err := endpoint.CreateClient() + suite.Require().NoError(err) + + clientState := dymintCounterpartyChain.GetClientState(endpoint.ClientID) + height := clientState.GetLatestHeight() + + store := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + actualTime, ok := types.GetProcessedTime(store, height) + suite.Require().True(ok, "could not retrieve processed time for stored consensus state") + suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + + suite.coordinator.UpdateTime() + // coordinator increments time before updating client + if dymintCounterpartyChain.TestChainClient.GetSelfClientType() == exported.Tendermint { + expectedTime = dymintCounterpartyChain.TestChainClient.(*ibctesting.TestChainTendermint).CurrentHeader.Time.Add(ibctesting.TimeIncrement) + } else { + expectedTime = dymintCounterpartyChain.TestChainClient.(*ibctesting.TestChainDymint).CurrentHeader.Time.Add(ibctesting.TimeIncrement) + } + + // Verify ProcessedTime on UpdateClient + err = endpoint.UpdateClient() + suite.Require().NoError(err) + + clientState = dymintCounterpartyChain.GetClientState(endpoint.ClientID) + height = clientState.GetLatestHeight() + + store = dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + actualTime, ok = types.GetProcessedTime(store, height) + suite.Require().True(ok, "could not retrieve processed time for stored consensus state") + suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + + // try to get processed time for height that doesn't exist in store + _, ok = types.GetProcessedTime(store, clienttypes.NewHeight(1, 1)) + suite.Require().False(ok, "retrieved processed time for a non-existent consensus state") +} + +func (suite *DymintTestSuite) TestIterationKey() { + testHeights := []exported.Height{ + clienttypes.NewHeight(0, 1), + clienttypes.NewHeight(0, 1234), + clienttypes.NewHeight(7890, 4321), + clienttypes.NewHeight(math.MaxUint64, math.MaxUint64), + } + for _, h := range testHeights { + k := types.IterationKey(h) + retrievedHeight := types.GetHeightFromIterationKey(k) + suite.Require().Equal(h, retrievedHeight, "retrieving height from iteration key failed") + } +} + +func (suite *DymintTestSuite) TestIterateConsensusStates() { + var dymintCounterpartyChain *ibctesting.TestChain + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Tendermint { + // chainA must be Dymint + dymintCounterpartyChain = suite.chainB + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + } + + nextValsHash := []byte("nextVals") + + // Set iteration keys and consensus states + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), clienttypes.NewHeight(0, 1)) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", clienttypes.NewHeight(0, 1), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash)) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), clienttypes.NewHeight(4, 9)) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", clienttypes.NewHeight(4, 9), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash)) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), clienttypes.NewHeight(0, 10)) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", clienttypes.NewHeight(0, 10), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-10")), nextValsHash)) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), clienttypes.NewHeight(0, 4)) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", clienttypes.NewHeight(0, 4), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash)) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), clienttypes.NewHeight(40, 1)) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", clienttypes.NewHeight(40, 1), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash40-1")), nextValsHash)) + + var testArr []string + cb := func(height exported.Height) bool { + testArr = append(testArr, height.String()) + return false + } + + types.IterateConsensusStateAscending(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), cb) + expectedArr := []string{"0-1", "0-4", "0-10", "4-9", "40-1"} + suite.Require().Equal(expectedArr, testArr) +} + +func (suite *DymintTestSuite) TestGetNeighboringConsensusStates() { + var dymintCounterpartyChain *ibctesting.TestChain + if suite.chainB.TestChainClient.GetSelfClientType() == exported.Tendermint { + // chainA must be Dymint + dymintCounterpartyChain = suite.chainB + } else { + // chainB must be Dymint + dymintCounterpartyChain = suite.chainA + } + + nextValsHash := []byte("nextVals") + cs01 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash) + cs04 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash) + cs49 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash) + height01 := clienttypes.NewHeight(0, 1) + height04 := clienttypes.NewHeight(0, 4) + height49 := clienttypes.NewHeight(4, 9) + + // Set iteration keys and consensus states + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), height01) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", height01, cs01) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), height04) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", height04, cs04) + types.SetIterationKey(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), height49) + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(dymintCounterpartyChain.GetContext(), "testClient", height49, cs49) + + prevCs01, ok := types.GetPreviousConsensusState(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), dymintCounterpartyChain.Codec, height01) + suite.Require().Nil(prevCs01, "consensus state exists before lowest consensus state") + suite.Require().False(ok) + prevCs49, ok := types.GetPreviousConsensusState(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), dymintCounterpartyChain.Codec, height49) + suite.Require().Equal(cs04, prevCs49, "previous consensus state is not returned correctly") + suite.Require().True(ok) + + nextCs01, ok := types.GetNextConsensusState(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), dymintCounterpartyChain.Codec, height01) + suite.Require().Equal(cs04, nextCs01, "next consensus state not returned correctly") + suite.Require().True(ok) + nextCs49, ok := types.GetNextConsensusState(dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), "testClient"), dymintCounterpartyChain.Codec, height49) + suite.Require().Nil(nextCs49, "next consensus state exists after highest consensus state") + suite.Require().False(ok) +} diff --git a/modules/light-clients/01-dymint/types/update.go b/modules/light-clients/01-dymint/types/update.go new file mode 100644 index 00000000000..cdd6072a92f --- /dev/null +++ b/modules/light-clients/01-dymint/types/update.go @@ -0,0 +1,274 @@ +package types + +import ( + fmt "fmt" + "reflect" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/tendermint/tendermint/light" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// CheckHeaderAndUpdateState checks if the provided header is valid, and if valid it will: +// create the consensus state for the header.Height +// and update the client state if the header height is greater than the latest client state height +// It returns an error if: +// - the client or header provided are not parseable to dymint types +// - the header is invalid +// - header height is less than or equal to the trusted header height +// - header revision is not equal to trusted header revision +// - header valset commit verification fails +// - header timestamp is past the trusting period in relation to the consensus state +// - header timestamp is less than or equal to the consensus state timestamp +// +// UpdateClient may be used to either create a consensus state for: +// - a future height greater than the latest client state height +// - a past height that was skipped during bisection +// If we are updating to a past height, a consensus state is created for that height to be persisted in client store +// If we are updating to a future height, the consensus state is created and the client state is updated to reflect +// the new latest height +// UpdateClient must only be used to update within a single revision, thus header revision number and trusted height's revision +// number must be the same. To update to a new revision, use a separate upgrade path +// Dymint client validity checking uses the bisection algorithm described +// in the [Tendermint spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md). +// +// Misbehaviour Detection: +// UpdateClient will detect implicit misbehaviour by enforcing certain invariants on any new update call and will return a frozen client. +// 1. Any valid update that creates a different consensus state for an already existing height is evidence of misbehaviour and will freeze client. +// 2. Any valid update that breaks time monotonicity with respect to its neighboring consensus states is evidence of misbehaviour and will freeze client. +// Misbehaviour sets frozen height to {0, 1} since it is only used as a boolean value (zero or non-zero). +// +// Pruning: +// UpdateClient will additionally retrieve the earliest consensus state for this clientID and check if it is expired. If it is, +// that consensus state will be pruned from store along with all associated metadata. This will prevent the client store from +// becoming bloated with expired consensus states that can no longer be used for updates and packet verification. +func (cs ClientState) CheckHeaderAndUpdateState( + ctx sdk.Context, cdc codec.BinaryCodec, clientStore sdk.KVStore, + header exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + tmHeader, ok := header.(*Header) + if !ok { + return nil, nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, "expected type %T, got %T", &Header{}, header, + ) + } + + // Check if the Client store already has a consensus state for the header's height + // If the consensus state exists, and it matches the header then we return early + // since header has already been submitted in a previous UpdateClient. + var conflictingHeader bool + prevConsState, _ := GetConsensusState(clientStore, cdc, header.GetHeight()) + if prevConsState != nil { + // This header has already been submitted and the necessary state is already stored + // in client store, thus we can return early without further validation. + if reflect.DeepEqual(prevConsState, tmHeader.ConsensusState()) { + return &cs, prevConsState, nil + } + // A consensus state already exists for this height, but it does not match the provided header. + // Thus, we must check that this header is valid, and if so we will freeze the client. + conflictingHeader = true + } + + // get consensus state from clientStore + trustedConsState, err := GetConsensusState(clientStore, cdc, tmHeader.TrustedHeight) + if err != nil { + return nil, nil, sdkerrors.Wrapf( + err, "could not get consensus state from clientstore at TrustedHeight: %s", tmHeader.TrustedHeight, + ) + } + + if err := checkValidity(&cs, trustedConsState, tmHeader, ctx.BlockTime()); err != nil { + return nil, nil, err + } + + consState := tmHeader.ConsensusState() + // Header is different from existing consensus state and also valid, so freeze the client and return + if conflictingHeader { + cs.FrozenHeight = FrozenHeight + return &cs, consState, nil + } + // Check that consensus state timestamps are monotonic + prevCons, prevOk := GetPreviousConsensusState(clientStore, cdc, header.GetHeight()) + nextCons, nextOk := GetNextConsensusState(clientStore, cdc, header.GetHeight()) + // if previous consensus state exists, check consensus state time is greater than previous consensus state time + // if previous consensus state is not before current consensus state, freeze the client and return. + if prevOk && !prevCons.Timestamp.Before(consState.Timestamp) { + cs.FrozenHeight = FrozenHeight + return &cs, consState, nil + } + // if next consensus state exists, check consensus state time is less than next consensus state time + // if next consensus state is not after current consensus state, freeze the client and return. + if nextOk && !nextCons.Timestamp.After(consState.Timestamp) { + cs.FrozenHeight = FrozenHeight + return &cs, consState, nil + } + + // Check the earliest consensus state to see if it is expired, if so then set the prune height + // so that we can delete consensus state and all associated metadata. + var ( + pruneHeight exported.Height + pruneError error + ) + pruneCb := func(height exported.Height) bool { + consState, err := GetConsensusState(clientStore, cdc, height) + // this error should never occur + if err != nil { + pruneError = err + return true + } + if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) { + pruneHeight = height + } + return true + } + err = IterateConsensusStateAscending(clientStore, pruneCb) + if err != nil { + return nil, nil, err + } + if pruneError != nil { + return nil, nil, pruneError + } + // if pruneHeight is set, delete consensus state and metadata + if pruneHeight != nil { + deleteConsensusState(clientStore, pruneHeight) + deleteConsensusMetadata(clientStore, pruneHeight) + } + + newClientState, consensusState := update(ctx, clientStore, &cs, tmHeader) + return newClientState, consensusState, nil +} + +func _verifyNewHeaderAndVals( + trustedHeader *tmtypes.SignedHeader, + untrustedHeader *tmtypes.SignedHeader, + trustingPeriod time.Duration, + now time.Time, + maxClockDrift time.Duration, +) error { + if light.HeaderExpired(trustedHeader, trustingPeriod, now) { + return light.ErrOldHeaderExpired{At: trustedHeader.Time.Add(trustingPeriod), Now: now} + } + + if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil { + return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err) + } + + if untrustedHeader.Height <= trustedHeader.Height { + return fmt.Errorf("expected new header height %d to be greater than one of old header %d", + untrustedHeader.Height, + trustedHeader.Height) + } + + if !untrustedHeader.Time.After(trustedHeader.Time) { + return fmt.Errorf("expected new header time %v to be after old header time %v", + untrustedHeader.Time, + trustedHeader.Time) + } + + if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { + return fmt.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", + untrustedHeader.Time, + now, + maxClockDrift) + } + + return nil +} + +// checkValidity checks if the Dymint header is valid. +// CONTRACT: consState.Height == header.TrustedHeight +func checkValidity( + clientState *ClientState, consState *ConsensusState, + header *Header, currentTimestamp time.Time, +) error { + if err := header.ValidateCommit(); err != nil { + return err + } + // UpdateClient only accepts updates with a header at the same revision + // as the trusted consensus state + if header.GetHeight().GetRevisionNumber() != header.TrustedHeight.RevisionNumber { + return sdkerrors.Wrapf( + ErrInvalidHeaderHeight, + "header height revision %d does not match trusted header revision %d", + header.GetHeight().GetRevisionNumber(), header.TrustedHeight.RevisionNumber, + ) + } + + tmSignedHeader, err := tmtypes.SignedHeaderFromProto(header.SignedHeader) + if err != nil { + return sdkerrors.Wrap(err, "signed header in not dymint signed header type") + } + + _, err = tmtypes.ValidatorSetFromProto(header.ValidatorSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set in not dymint validator set type") + } + + // assert header height is newer than consensus state + if header.GetHeight().LTE(header.TrustedHeight) { + return sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, + "header height ≤ consensus state height (%s ≤ %s)", header.GetHeight(), header.TrustedHeight, + ) + } + + chainID := clientState.GetChainID() + // If chainID is in revision format, then set revision number of chainID with the revision number + // of the header we are verifying + // This is useful if the update is at a previous revision rather than an update to the latest revision + // of the client. + // The chainID must be set correctly for the previous revision before attempting verification. + // Updates for previous revisions are not supported if the chainID is not in revision format. + if clienttypes.IsRevisionFormat(chainID) { + chainID, _ = clienttypes.SetRevisionNumber(chainID, header.GetHeight().GetRevisionNumber()) + } + + // Construct a trusted header using the fields in consensus state + // Only Height and Time are necessary for verification + trustedHeader := tmtypes.Header{ + ChainID: chainID, + Height: int64(header.TrustedHeight.RevisionHeight), + Time: consState.Timestamp, + } + signedHeader := tmtypes.SignedHeader{ + Header: &trustedHeader, + } + + // Verify next header with the passed-in trustedVals + // - asserts trusting period not passed + // - assert header timestamp is not past the trusting period + // - assert header timestamp is past latest stored consensus state timestamp + err = _verifyNewHeaderAndVals( + &signedHeader, + tmSignedHeader, + clientState.TrustingPeriod, currentTimestamp, clientState.MaxClockDrift, + ) + if err != nil { + return sdkerrors.Wrap(err, "failed to verify header") + } + return nil +} + +// update the consensus state from a new header and set processed time metadata +func update(ctx sdk.Context, clientStore sdk.KVStore, clientState *ClientState, header *Header) (*ClientState, *ConsensusState) { + height := header.GetHeight().(clienttypes.Height) + if height.GT(clientState.LatestHeight) { + clientState.LatestHeight = height + } + consensusState := &ConsensusState{ + Timestamp: header.GetTime(), + Root: commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), + } + + // set metadata for this consensus state + setConsensusMetadata(ctx, clientStore, header.GetHeight()) + + return clientState, consensusState +} diff --git a/modules/light-clients/01-dymint/types/update_test.go b/modules/light-clients/01-dymint/types/update_test.go new file mode 100644 index 00000000000..4c150883859 --- /dev/null +++ b/modules/light-clients/01-dymint/types/update_test.go @@ -0,0 +1,449 @@ +package types_test + +import ( + "fmt" + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + types "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" + ibctestingmock "github.com/cosmos/ibc-go/v5/testing/mock" +) + +func (suite *DymintTestSuite) TestCheckHeaderAndUpdateState() { + var ( + clientState *types.ClientState + consensusState *types.ConsensusState + consStateHeight clienttypes.Height + newHeader *types.Header + currentTime time.Time + bothValSet *tmtypes.ValidatorSet + signers []tmtypes.PrivValidator + bothSigners []tmtypes.PrivValidator + ) + + // Setup different validators and signers for testing different types of updates + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + revisionHeight := int64(height.RevisionHeight) + + // create modified heights to use for test-cases + heightPlus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+1) + heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1) + heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3) + heightPlus5 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+5) + + altVal := tmtypes.NewValidator(altPubKey, revisionHeight) + + var chainADymint *ibctesting.TestChainDymint + + testCases := []struct { + name string + setup func(*DymintTestSuite) + expFrozen bool + expPass bool + }{ + { + name: "successful update with next height and same validator set", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: true, + }, + { + name: "successful update with future height and different validator set", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expFrozen: false, + expPass: true, + }, + { + name: "successful update with next height and different validator set", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash()) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners) + currentTime = suite.now + }, + expFrozen: false, + expPass: true, + }, + { + name: "successful update for a previous height", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + consStateHeight = heightMinus3 + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightMinus1.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expFrozen: false, + expPass: true, + }, + { + name: "successful update for a previous revision", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainIDRevision1, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + consStateHeight = heightMinus3 + newHeader = chainADymint.CreateDMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "successful update with identical header to a previous update", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, heightPlus1, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + // Store the header's consensus state in client store before UpdateClient call + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, newHeader.ConsensusState()) + }, + expFrozen: false, + expPass: true, + }, + { + name: "misbehaviour detection: header conflicts with existing consensus state", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, heightPlus1, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + // Change the consensus state of header and store in client store to create a conflict + conflictConsState := newHeader.ConsensusState() + conflictConsState.Root = commitmenttypes.NewMerkleRoot([]byte("conflicting apphash")) + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, conflictConsState) + }, + expFrozen: true, + expPass: true, + }, + { + name: "misbehaviour detection: previous consensus state time is not before header time. time monotonicity violation", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + // create an intermediate consensus state with the same time as the newHeader to create a time violation. + // header time is after client time + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + prevConsensusState := types.NewConsensusState(suite.headerTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, prevConsensusState) + clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID) + types.SetIterationKey(clientStore, heightPlus1) + }, + expFrozen: true, + expPass: true, + }, + { + name: "misbehaviour detection: next consensus state time is not after header time. time monotonicity violation", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + // create the next consensus state with the same time as the intermediate newHeader to create a time violation. + // header time is after clientTime + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + nextConsensusState := types.NewConsensusState(suite.headerTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus5, nextConsensusState) + clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID) + types.SetIterationKey(clientStore, heightPlus5) + }, + expFrozen: true, + expPass: true, + }, + { + name: "unsuccessful update with incorrect header chain-id", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader("ethermint", int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "unsuccessful update to a future revision", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainIDRevision0, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainIDRevision1, 1, height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update: header height revision and trusted height revision mismatch", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainIDRevision1, trustingPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainIDRevision1, 3, height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "unsuccessful update: trusting period has passed since last client timestamp", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + // make current time pass trusting period from last timestamp on clientstate + currentTime = suite.now.Add(trustingPeriod) + }, + expFrozen: false, + expPass: false, + }, + { + name: "unsuccessful update: header timestamp is past current timestamp", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "unsuccessful update: header timestamp is not past last client timestamp", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.clientTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "header basic validation failed", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + // cause new header to fail validatebasic by changing commit height to mismatch header height + newHeader.SignedHeader.Commit.Height = revisionHeight - 1 + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "header height < consensus height", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, clienttypes.NewHeight(height.RevisionNumber, heightPlus5.RevisionHeight), commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + // Make new header at height less than latest client state + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightMinus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "proposer is not in the validator set", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newValSet := suite.valSet + newValSet.Proposer = altVal + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "wrong proposer address", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + newHeader.SignedHeader.Commit.Signatures[0].ValidatorAddress = altVal.Address + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + { + name: "wrong proposer signature", + setup: func(suite *DymintTestSuite) { + clientState = types.NewClientState(chainID, trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = chainADymint.CreateDMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + newHeader.SignedHeader.Commit.Signatures[0].Signature = []byte{123} + currentTime = suite.now + }, + expFrozen: false, + expPass: false, + }, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { + suite.SetupTestWithConsensusType(exported.Dymint, exported.Tendermint) // reset + chainADymint = suite.chainA.TestChainClient.(*ibctesting.TestChainDymint) + + // Create bothValSet with both suite validator and altVal. Would be valid update + bothValSet = tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + signers = []tmtypes.PrivValidator{suite.privVal} + + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners = ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + consStateHeight = height // must be explicitly changed + // setup test + tc.setup(suite) + + // Set current timestamp in context + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + + // Set trusted consensus state in client store + suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState) + + height := newHeader.GetHeight() + expectedConsensus := &types.ConsensusState{ + Timestamp: newHeader.GetTime(), + Root: commitmenttypes.NewMerkleRoot(newHeader.Header.GetAppHash()), + } + + newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState( + ctx, + suite.cdc, + suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore + newHeader, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + + suite.Require().Equal(tc.expFrozen, !newClientState.(*types.ClientState).FrozenHeight.IsZero(), "client state status is unexpected after update") + + // further writes only happen if update is not misbehaviour + if !tc.expFrozen { + // Determine if clientState should be updated or not + // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height + if height.GT(clientState.LatestHeight) { + // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight() + suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update") + } else { + // Update will add past consensus state, clientState should not be updated at all + suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header") + } + + suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name) + } + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(consensusState, "invalid test case %d passed: %s", i, tc.name) + } + }) + } +} + +func (suite *DymintTestSuite) TestPruneConsensusState() { + // create path and setup clients + path := ibctesting.NewPath(suite.chainA, suite.chainB) + suite.coordinator.SetupClients(path) + + // get the first height as it will be pruned first. + var pruneHeight exported.Height + getFirstHeightCb := func(height exported.Height) bool { + pruneHeight = height + return true + } + ctx := path.EndpointA.Chain.GetContext() + clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + err := types.IterateConsensusStateAscending(clientStore, getFirstHeightCb) + suite.Require().Nil(err) + + // this height will be expired but not pruned + path.EndpointA.UpdateClient() + expiredHeight := path.EndpointA.GetClientState().GetLatestHeight() + + // expected values that must still remain in store after pruning + expectedConsState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight) + suite.Require().True(ok) + ctx = path.EndpointA.Chain.GetContext() + clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight) + suite.Require().True(ok) + expectedProcessHeight, ok := types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().True(ok) + expectedConsKey := types.GetIterationKey(clientStore, expiredHeight) + suite.Require().NotNil(expectedConsKey) + + // Increment the time by a week + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + + // create the consensus state that can be used as trusted height for next update + path.EndpointA.UpdateClient() + + // Increment the time by another week, then update the client. + // This will cause the first two consensus states to become expired. + suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + path.EndpointA.UpdateClient() + + ctx = path.EndpointA.Chain.GetContext() + clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + + // check that the first expired consensus state got deleted along with all associated metadata + consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + suite.Require().Nil(consState, "expired consensus state not pruned") + suite.Require().False(ok) + // check processed time metadata is pruned + processTime, ok := types.GetProcessedTime(clientStore, pruneHeight) + suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") + suite.Require().False(ok) + processHeight, ok := types.GetProcessedHeight(clientStore, pruneHeight) + suite.Require().Nil(processHeight, "processed height metadata not pruned") + suite.Require().False(ok) + + // check iteration key metadata is pruned + consKey := types.GetIterationKey(clientStore, pruneHeight) + suite.Require().Nil(consKey, "iteration key not pruned") + + // check that second expired consensus state doesn't get deleted + // this ensures that there is a cap on gas cost of UpdateClient + consState, ok = path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight) + suite.Require().Equal(expectedConsState, consState, "consensus state incorrectly pruned") + suite.Require().True(ok) + // check processed time metadata is not pruned + processTime, ok = types.GetProcessedTime(clientStore, expiredHeight) + suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned") + suite.Require().True(ok) + + // check processed height metadata is not pruned + processHeight, ok = types.GetProcessedHeight(clientStore, expiredHeight) + suite.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned") + suite.Require().True(ok) + + // check iteration key metadata is not pruned + consKey = types.GetIterationKey(clientStore, expiredHeight) + suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned") +} diff --git a/modules/light-clients/01-dymint/types/upgrade.go b/modules/light-clients/01-dymint/types/upgrade.go new file mode 100644 index 00000000000..666cc4de465 --- /dev/null +++ b/modules/light-clients/01-dymint/types/upgrade.go @@ -0,0 +1,155 @@ +package types + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client +// It will zero out all client-specific fields (e.g. TrustingPeriod and verify all data +// in client state that must be the same across all valid Dymint clients for the new chain. +// VerifyUpgrade will return an error if: +// - the upgradedClient is not a Dymint ClientState +// - the lastest height of the client state does not have the same revision number or has a greater +// height than the committed client. +// - the height of upgraded client is not greater than that of current client +// - the latest height of the new client does not match or is greater than the height in committed client +// - any Dymint chain specified parameter in upgraded client such as ChainID, +// and ProofSpecs do not match parameters set by committed client +func (cs ClientState) VerifyUpgradeAndUpdateState( + ctx sdk.Context, cdc codec.BinaryCodec, clientStore sdk.KVStore, + upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState, + proofUpgradeClient, proofUpgradeConsState []byte, +) (exported.ClientState, exported.ConsensusState, error) { + if len(cs.UpgradePath) == 0 { + return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade client, no upgrade path set") + } + + // last height of current counterparty chain must be client's latest height + lastHeight := cs.GetLatestHeight() + + if !upgradedClient.GetLatestHeight().GT(lastHeight) { + return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "upgraded client height %s must be at greater than current client height %s", + upgradedClient.GetLatestHeight(), lastHeight) + } + + // upgraded client state and consensus state must be IBC dymint client state and consensus state + // this may be modified in the future to upgrade to a new IBC dymint type + // counterparty must also commit to the upgraded consensus state at a sub-path under the upgrade path specified + tmUpgradeClient, ok := upgradedClient.(*ClientState) + if !ok { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "upgraded client must be Dymint client. expected: %T got: %T", + &ClientState{}, upgradedClient) + } + tmUpgradeConsState, ok := upgradedConsState.(*ConsensusState) + if !ok { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "upgraded consensus state must be Dymint consensus state. expected %T, got: %T", + &ConsensusState{}, upgradedConsState) + } + + // unmarshal proofs + var merkleProofClient, merkleProofConsState commitmenttypes.MerkleProof + if err := cdc.Unmarshal(proofUpgradeClient, &merkleProofClient); err != nil { + return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal client merkle proof: %v", err) + } + if err := cdc.Unmarshal(proofUpgradeConsState, &merkleProofConsState); err != nil { + return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal consensus state merkle proof: %v", err) + } + + // Must prove against latest consensus state to ensure we are verifying against latest upgrade plan + // This verifies that upgrade is intended for the provided revision, since committed client must exist + // at this consensus state + consState, err := GetConsensusState(clientStore, cdc, lastHeight) + if err != nil { + return nil, nil, sdkerrors.Wrap(err, "could not retrieve consensus state for lastHeight") + } + + // Verify client proof + bz, err := cdc.MarshalInterface(upgradedClient) + if err != nil { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "could not marshal client state: %v", err) + } + // construct clientState Merkle path + upgradeClientPath := constructUpgradeClientMerklePath(cs.UpgradePath, lastHeight) + if err := merkleProofClient.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeClientPath, bz); err != nil { + return nil, nil, sdkerrors.Wrapf(err, "client state proof failed. Path: %s", upgradeClientPath.Pretty()) + } + + // Verify consensus state proof + bz, err = cdc.MarshalInterface(upgradedConsState) + if err != nil { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "could not marshal consensus state: %v", err) + } + // construct consensus state Merkle path + upgradeConsStatePath := constructUpgradeConsStateMerklePath(cs.UpgradePath, lastHeight) + if err := merkleProofConsState.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeConsStatePath, bz); err != nil { + return nil, nil, sdkerrors.Wrapf(err, "consensus state proof failed. Path: %s", upgradeConsStatePath.Pretty()) + } + + // Construct new client state and consensus state + // Relayer chosen client parameters are ignored. + // All chain-chosen parameters come from committed client, all client-chosen parameters + // come from current client. + newClientState := NewClientState( + tmUpgradeClient.ChainId, cs.TrustingPeriod, + cs.MaxClockDrift, tmUpgradeClient.LatestHeight, tmUpgradeClient.ProofSpecs, tmUpgradeClient.UpgradePath, + ) + + if err := newClientState.Validate(); err != nil { + return nil, nil, sdkerrors.Wrap(err, "updated client state failed basic validation") + } + + // The new consensus state is merely used as a trusted kernel against which headers on the new + // chain can be verified. The root is just a stand-in sentinel value as it cannot be known in advance, thus no proof verification will pass. + // The timestamp of the consensus state is the blocktime + // of the last block committed by the old chain. This will allow the first block of the new chain to be verified against + // the last validators of the old chain so long as it is submitted within the TrustingPeriod of this client. + // NOTE: We do not set processed time for this consensus state since this consensus state should not be used for packet verification + // as the root is empty. The next consensus state submitted using update will be usable for packet-verification. + newConsState := NewConsensusState( + tmUpgradeConsState.Timestamp, commitmenttypes.NewMerkleRoot([]byte(SentinelRoot)), tmUpgradeConsState.NextValidatorsHash, + ) + + // set metadata for this consensus state + setConsensusMetadata(ctx, clientStore, tmUpgradeClient.LatestHeight) + + return newClientState, newConsState, nil +} + +// construct MerklePath for the committed client from upgradePath +func constructUpgradeClientMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath { + // copy all elements from upgradePath except final element + clientPath := make([]string, len(upgradePath)-1) + copy(clientPath, upgradePath) + + // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath + // this will create the IAVL key that is used to store client in upgrade store + lastKey := upgradePath[len(upgradePath)-1] + appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedClient) + + clientPath = append(clientPath, appendedKey) + return commitmenttypes.NewMerklePath(clientPath...) +} + +// construct MerklePath for the committed consensus state from upgradePath +func constructUpgradeConsStateMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath { + // copy all elements from upgradePath except final element + consPath := make([]string, len(upgradePath)-1) + copy(consPath, upgradePath) + + // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath + // this will create the IAVL key that is used to store client in upgrade store + lastKey := upgradePath[len(upgradePath)-1] + appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedConsState) + + consPath = append(consPath, appendedKey) + return commitmenttypes.NewMerklePath(consPath...) +} diff --git a/modules/light-clients/01-dymint/types/upgrade_test.go b/modules/light-clients/01-dymint/types/upgrade_test.go new file mode 100644 index 00000000000..6261f5aa19b --- /dev/null +++ b/modules/light-clients/01-dymint/types/upgrade_test.go @@ -0,0 +1,493 @@ +package types_test + +import ( + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + ibctesting "github.com/cosmos/ibc-go/v5/testing" +) + +var newChainId = "newChainId-1" + +func (suite *DymintTestSuite) TestVerifyUpgrade() { + var ( + upgradedClient exported.ClientState + upgradedConsState exported.ConsensusState + lastHeight clienttypes.Height + path *ibctesting.Path + proofUpgradedClient, proofUpgradedConsState []byte + upgradedClientBz, upgradedConsStateBz []byte + err error + dymintCounterpartyChain, dymintChain *ibctesting.TestChain + endpoint *ibctesting.Endpoint + ) + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + name: "successful upgrade", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: true, + }, + { + name: "successful upgrade to same revision", + setup: func() { + upgradedHeight := clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+2)) + // don't use -1 suffix in chain id + upgradedClient = types.NewClientState("newChainId", trustingPeriod, maxClockDrift, upgradedHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClient = upgradedClient.ZeroCustomFields() + upgradedClientBz, err = clienttypes.MarshalClientState(dymintCounterpartyChain.App.AppCodec(), upgradedClient) + suite.Require().NoError(err) + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: true, + }, + + { + name: "unsuccessful upgrade: upgrade height revision height is more than the current client revision height", + setup: func() { + // upgrade Height is 10 blocks from now + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+10)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: committed client does not have zeroed custom fields", + setup: func() { + // non-zeroed upgrade client + upgradedClient = types.NewClientState(newChainId, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClientBz, err = clienttypes.MarshalClientState(dymintCounterpartyChain.App.AppCodec(), upgradedClient) + suite.Require().NoError(err) + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: chain-specified parameters do not match committed client", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // change upgradedClient client-specified parameters + upgradedClient = types.NewClientState("wrongchainID", trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client-specified parameters do not match previous client", + setup: func() { + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // change upgradedClient client-specified parameters + upgradedClient = types.NewClientState(newChainId, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: relayer-submitted consensus state does not match counterparty-committed consensus state", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // change submitted upgradedConsensusState + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("maliciousValidators"), + } + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client proof unmarshal failed", + setup: func() { + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + proofUpgradedClient = []byte("proof") + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state proof unmarshal failed", + setup: func() { + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + proofUpgradedConsState = []byte("proof") + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client proof verification failed", + setup: func() { + // do not store upgraded client + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state proof verification failed", + setup: func() { + // do not store upgraded client + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: upgrade path is empty", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + // SetClientState with empty upgrade path + tmClient, _ := cs.(*types.ClientState) + tmClient.UpgradePath = []string{""} + dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.SetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID, tmClient) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: upgraded height is not greater than current height", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state for upgrade height cannot be found", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+100)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client is expired", + setup: func() { + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + // expire chainB's client + dymintCounterpartyChain.ExpireClient(ubdPeriod) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: updated unbonding period is equal to trusting period", + setup: func() { + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: final client is not valid", + setup: func() { + // new client has smaller unbonding period such that old trusting period is no longer valid + upgradedClient = types.NewClientState(newChainId, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClientBz, err = clienttypes.MarshalClientState(dymintCounterpartyChain.App.AppCodec(), upgradedClient) + suite.Require().NoError(err) + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(dymintChain.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedClient(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + dymintChain.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(dymintChain.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(dymintChain) + err := endpoint.UpdateClient() + suite.Require().NoError(err) + + cs, found := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.GetClientState(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + suite.Require().True(found) + + proofUpgradedClient, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = dymintChain.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + // reset suite + suite.SetupTest() + path = ibctesting.NewPath(suite.chainA, suite.chainB) + + suite.coordinator.SetupClients(path) + + if suite.chainA.TestChainClient.GetSelfClientType() == exported.Dymint { + dymintChain = suite.chainA + dymintCounterpartyChain = suite.chainB + endpoint = path.EndpointB + } else { + dymintChain = suite.chainB + dymintCounterpartyChain = suite.chainA + endpoint = path.EndpointA + } + + upgradedClient = types.NewClientState(newChainId, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClient = upgradedClient.ZeroCustomFields() + upgradedClientBz, err = clienttypes.MarshalClientState(dymintCounterpartyChain.App.AppCodec(), upgradedClient) + suite.Require().NoError(err) + + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + upgradedConsStateBz, err = clienttypes.MarshalConsensusState(dymintCounterpartyChain.App.AppCodec(), upgradedConsState) + suite.Require().NoError(err) + + tc.setup() + + cs := dymintCounterpartyChain.GetClientState(endpoint.ClientID) + clientStore := dymintCounterpartyChain.App.GetIBCKeeper().ClientKeeper.ClientStore(dymintCounterpartyChain.GetContext(), endpoint.ClientID) + + // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient + upgradedClient = upgradedClient.ZeroCustomFields() + + clientState, consensusState, err := cs.VerifyUpgradeAndUpdateState( + dymintCounterpartyChain.GetContext(), + suite.cdc, + clientStore, + upgradedClient, + upgradedConsState, + proofUpgradedClient, + proofUpgradedConsState, + ) + + if tc.expPass { + suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) + suite.Require().NotNil(clientState, "verify upgrade failed on valid case: %s", tc.name) + suite.Require().NotNil(consensusState, "verify upgrade failed on valid case: %s", tc.name) + } else { + suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name) + suite.Require().Nil(clientState, "verify upgrade passed on invalid case: %s", tc.name) + + suite.Require().Nil(consensusState, "verify upgrade passed on invalid case: %s", tc.name) + + } + } +} diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go index 8db1903e76a..e393a8fba9e 100644 --- a/modules/light-clients/07-tendermint/types/client_state.go +++ b/modules/light-clients/07-tendermint/types/client_state.go @@ -216,10 +216,10 @@ func (cs ClientState) VerifyClientState( return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "client state cannot be empty") } - _, ok := clientState.(*ClientState) - if !ok { - return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid client type %T, expected %T", clientState, &ClientState{}) - } + // _, ok := clientState.(*ClientState) + // if !ok { + // return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid client type %T, expected %T", clientState, &ClientState{}) + // } bz, err := cdc.MarshalInterface(clientState) if err != nil { @@ -256,10 +256,10 @@ func (cs ClientState) VerifyClientConsensusState( return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty") } - _, ok := consensusState.(*ConsensusState) - if !ok { - return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}) - } + // _, ok := consensusState.(*ConsensusState) + // if !ok { + // return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}) + // } bz, err := cdc.MarshalInterface(consensusState) if err != nil { diff --git a/modules/light-clients/07-tendermint/types/self_client.go b/modules/light-clients/07-tendermint/types/self_client.go new file mode 100644 index 00000000000..b1a0b86f222 --- /dev/null +++ b/modules/light-clients/07-tendermint/types/self_client.go @@ -0,0 +1,113 @@ +package types + +import ( + "reflect" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/tendermint/tendermint/light" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" +) + +var _ exported.SelfClient = (*SelfClient)(nil) + +type SelfClient struct{} + +// NewClientState creates a new ClientState instance +func NewSelfClient() exported.SelfClient { + return &SelfClient{} +} + +// ValidateSelfClientState validates the client parameters for a client of the running chain +// This function is only used to validate the client state the counterparty stores for this chain +// Client must be in same revision as the executing chain +func (sc SelfClient) ValidateSelfClientState( + ctx sdk.Context, + expectedUbdPeriod time.Duration, + clientState exported.ClientState, +) error { + tmClient, ok := clientState.(*ClientState) + if !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client must be a Tendermint client, expected: %T, got: %T", + &ClientState{}, tmClient) + } + + if !tmClient.FrozenHeight.IsZero() { + return clienttypes.ErrClientFrozen + } + + if ctx.ChainID() != tmClient.ChainId { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid chain-id. expected: %s, got: %s", + ctx.ChainID(), tmClient.ChainId) + } + + revision := clienttypes.ParseChainID(ctx.ChainID()) + + // client must be in the same revision as executing chain + if tmClient.LatestHeight.RevisionNumber != revision { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client is not in the same revision as the chain. expected revision: %d, got: %d", + tmClient.LatestHeight.RevisionNumber, revision) + } + + selfHeight := clienttypes.NewHeight(revision, uint64(ctx.BlockHeight())) + if tmClient.LatestHeight.GTE(selfHeight) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client has LatestHeight %d greater than or equal to chain height %d", + tmClient.LatestHeight, selfHeight) + } + + expectedProofSpecs := commitmenttypes.GetSDKSpecs() + if !reflect.DeepEqual(expectedProofSpecs, tmClient.ProofSpecs) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "client has invalid proof specs. expected: %v got: %v", + expectedProofSpecs, tmClient.ProofSpecs) + } + + if err := light.ValidateTrustLevel(tmClient.TrustLevel.ToTendermint()); err != nil { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "trust-level invalid: %v", err) + } + + if expectedUbdPeriod != tmClient.UnbondingPeriod { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid unbonding period. expected: %s, got: %s", + expectedUbdPeriod, tmClient.UnbondingPeriod) + } + + if tmClient.UnbondingPeriod < tmClient.TrustingPeriod { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "unbonding period must be greater than trusting period. unbonding period (%d) < trusting period (%d)", + tmClient.UnbondingPeriod, tmClient.TrustingPeriod) + } + + if len(tmClient.UpgradePath) != 0 { + // For now, SDK IBC implementation assumes that upgrade path (if defined) is defined by SDK upgrade module + expectedUpgradePath := []string{upgradetypes.StoreKey, upgradetypes.KeyUpgradedIBCState} + if !reflect.DeepEqual(expectedUpgradePath, tmClient.UpgradePath) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "upgrade path must be the upgrade path defined by upgrade module. expected %v, got %v", + expectedUpgradePath, tmClient.UpgradePath) + } + } + return nil +} + +func (sc SelfClient) GetSelfConsensusStateFromBlocHeader( + cdc codec.BinaryCodec, + blockHeader []byte, +) (exported.ConsensusState, error) { + // unmarshal block header + tmBlockHeader := &tmproto.Header{} + if err := cdc.Unmarshal(blockHeader, tmBlockHeader); err != nil { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidHeader, "could not unmarshal block header: %v", err) + } + return NewConsensusState(tmBlockHeader.Time, + commitmenttypes.NewMerkleRoot(tmBlockHeader.GetAppHash()), + tmBlockHeader.NextValidatorsHash), nil +} + +func (sc SelfClient) ClientType() string { + return exported.Tendermint +} diff --git a/proto/ibc/lightclients/dymint/dymint.proto b/proto/ibc/lightclients/dymint/dymint.proto new file mode 100755 index 00000000000..1702083b755 --- /dev/null +++ b/proto/ibc/lightclients/dymint/dymint.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; + +package ibc.lightclients.dymint; + +option go_package = "github.com/cosmos/ibc-go/v3/modules/light-clients/01-dymint/types"; + +import "tendermint/types/validator.proto"; +import "tendermint/types/types.proto"; +import "proofs.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "ibc/core/client/v1/client.proto"; +import "ibc/core/commitment/v1/commitment.proto"; +import "gogoproto/gogo.proto"; + +// ClientState from Dymint tracks the current validator set, latest height, +// and a possible frozen height. +message ClientState { + option (gogoproto.goproto_getters) = false; + + string chain_id = 1; + Fraction trust_level = 2 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"trust_level\""]; + // duration of the period since the LastestTimestamp during which the + // submitted headers are valid for upgrade + google.protobuf.Duration trusting_period = 3 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.moretags) = "yaml:\"trusting_period\""]; + // duration of the staking unbonding period + google.protobuf.Duration unbonding_period = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.moretags) = "yaml:\"unbonding_period\"" + ]; + // defines how much new (untrusted) header's Time can drift into the future. + google.protobuf.Duration max_clock_drift = 5 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.moretags) = "yaml:\"max_clock_drift\""]; + // Block height when the client was frozen due to a misbehaviour + ibc.core.client.v1.Height frozen_height = 6 + [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"frozen_height\""]; + // Latest height the client was updated to + ibc.core.client.v1.Height latest_height = 7 + [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"latest_height\""]; + + // Proof specifications used in verifying counterparty state + repeated ics23.ProofSpec proof_specs = 8 [(gogoproto.moretags) = "yaml:\"proof_specs\""]; + + // Path at which next upgraded client will be committed. + // Each element corresponds to the key for a single CommitmentProof in the + // chained proof. NOTE: ClientState must stored under + // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored + // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using + // the default upgrade module, upgrade_path should be []string{"upgrade", + // "upgradedIBCState"}` + repeated string upgrade_path = 9 [(gogoproto.moretags) = "yaml:\"upgrade_path\""]; +} + +// ConsensusState defines the consensus state from Dymint. +message ConsensusState { + option (gogoproto.goproto_getters) = false; + + // timestamp that corresponds to the block height in which the ConsensusState + // was stored. + google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + // commitment root (i.e app hash) + ibc.core.commitment.v1.MerkleRoot root = 2 [(gogoproto.nullable) = false]; + bytes next_validators_hash = 3 [ + (gogoproto.casttype) = "github.com/tendermint/tendermint/libs/bytes.HexBytes", + (gogoproto.moretags) = "yaml:\"next_validators_hash\"" + ]; +} + +// Misbehaviour is a wrapper over two conflicting Headers +// that implements Misbehaviour interface expected by ICS-02 +message Misbehaviour { + option (gogoproto.goproto_getters) = false; + + string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""]; + Header header_1 = 2 [(gogoproto.customname) = "Header1", (gogoproto.moretags) = "yaml:\"header_1\""]; + Header header_2 = 3 [(gogoproto.customname) = "Header2", (gogoproto.moretags) = "yaml:\"header_2\""]; +} + +// Header defines the Dymint client consensus Header. +// It encapsulates all the information necessary to update from a trusted +// Dymint ConsensusState. The inclusion of TrustedHeight and +// TrustedValidators allows this update to process correctly, so long as the +// ConsensusState for the TrustedHeight exists, this removes race conditions +// among relayers The SignedHeader and ValidatorSet are the new untrusted update +// fields for the client. The TrustedHeight is the height of a stored +// ConsensusState on the client that will be used to verify the new untrusted +// header. The Trusted ConsensusState must be within the unbonding period of +// current time in order to correctly verify, and the TrustedValidators must +// hash to TrustedConsensusState.NextValidatorsHash since that is the last +// trusted validator set at the TrustedHeight. +message Header { + .tendermint.types.SignedHeader signed_header = 1 + [(gogoproto.embed) = true, (gogoproto.moretags) = "yaml:\"signed_header\""]; + + .tendermint.types.ValidatorSet validator_set = 2 [(gogoproto.moretags) = "yaml:\"validator_set\""]; + ibc.core.client.v1.Height trusted_height = 3 + [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"trusted_height\""]; + .tendermint.types.ValidatorSet trusted_validators = 4 [(gogoproto.moretags) = "yaml:\"trusted_validators\""]; +} + +// Fraction defines the protobuf message type for tmmath.Fraction that only +// supports positive values. +message Fraction { + uint64 numerator = 1; + uint64 denominator = 2; +} diff --git a/testing/app.go b/testing/app.go index 2c41021f504..07070b6d08e 100644 --- a/testing/app.go +++ b/testing/app.go @@ -49,6 +49,13 @@ type TestingApp interface { LastBlockHeight() int64 } +func SetupTestingAppWithDymint() (TestingApp, map[string]json.RawMessage) { + db := dbm.NewMemDB() + encCdc := simapp.MakeTestEncodingConfig() + app := simapp.NewSimAppWithDymint(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encCdc, simapp.EmptyAppOptions{}) + return app, simapp.NewDefaultGenesisState(encCdc.Marshaler) +} + func SetupTestingApp() (TestingApp, map[string]json.RawMessage) { db := dbm.NewMemDB() encCdc := simapp.MakeTestEncodingConfig() diff --git a/testing/chain.go b/testing/chain.go index 1149224c855..9b5eca4840c 100644 --- a/testing/chain.go +++ b/testing/chain.go @@ -42,6 +42,32 @@ type SenderAccount struct { SenderAccount authtypes.AccountI } +type TestChainClientI interface { + GetContext() sdk.Context + NextBlock() + BeginBlock() + // UpdateCurrentHeaderTime(t time.Time) + // ClientConfigToState(ClientConfig ClientConfig) exported.ClientState + GetConsensusState() exported.ConsensusState + // NewConfig() ClientConfig + GetSelfClientType() string + // GetLastHeader() interface{} +} + +// func NewTestChainClient(chain *TestChain, chainConsensusType string) TestChainClientI { +// // set the last header to the current header +// // use nil trusted fields +// switch chainConsensusType { +// // case exported.Tendermint: +// // return NewChainTendermintClient(chain) +// case exported.Dymint: +// return NewChainDymintClient(chain) +// default: +// panic(fmt.Sprintf("client type %s is not supported", chainConsensusType)) +// } + +// } + // TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI // header and the validators of the TestChain. It also contains a field called ChainID. This // is the clientID that *other* chains use to refer to this TestChain. The SenderAccount @@ -74,6 +100,8 @@ type TestChain struct { SenderAccount authtypes.AccountI SenderAccounts []SenderAccount + + TestChainClient TestChainClientI } // NewTestChainWithValSet initializes a new TestChain instance with the given validator set @@ -92,6 +120,10 @@ type TestChain struct { // CONTRACT: Validator array must be provided in the order expected by Tendermint. // i.e. sorted first by power and then lexicographically by address. func NewTestChainWithValSet(t *testing.T, coord *Coordinator, chainID string, valSet *tmtypes.ValidatorSet, signers map[string]tmtypes.PrivValidator) *TestChain { + return newTestChainWithValSet(t, coord, chainID, valSet, signers, exported.Tendermint) +} + +func newTestChainWithValSet(t *testing.T, coord *Coordinator, chainID string, valSet *tmtypes.ValidatorSet, signers map[string]tmtypes.PrivValidator, consensusType string) *TestChain { genAccs := []authtypes.GenesisAccount{} genBals := []banktypes.Balance{} senderAccs := []SenderAccount{} @@ -120,6 +152,9 @@ func NewTestChainWithValSet(t *testing.T, coord *Coordinator, chainID string, va senderAccs = append(senderAccs, senderAcc) } + if consensusType == exported.Dymint { + DefaultTestingAppInit = SetupTestingAppWithDymint + } app := SetupWithGenesisValSet(t, valSet, genAccs, chainID, sdk.DefaultPowerReduction, genBals...) // create current header and call begin block @@ -180,6 +215,32 @@ func NewTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain { return NewTestChainWithValSet(t, coord, chainID, valSet, signersByAddress) } +// NewTestChain initializes a new test chain with a default of 4 validators +// Use this function if the tests do not need custom control over the validator set +func NewDymintTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain { + // generate validators private/public key + var ( + validatorsPerChain = 1 + validators []*tmtypes.Validator + signersByAddress = make(map[string]tmtypes.PrivValidator, validatorsPerChain) + ) + + for i := 0; i < validatorsPerChain; i++ { + privVal := mock.NewPV() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + validators = append(validators, tmtypes.NewValidator(pubKey, 1)) + signersByAddress[pubKey.Address().String()] = privVal + } + + // construct validator set; + // Note that the validators are sorted by voting power + // or, if equal, by address lexical order + valSet := tmtypes.NewValidatorSet(validators) + + return NewTestChainWithValSet(t, coord, chainID, valSet, signersByAddress) +} + // GetContext returns the current context for the application. func (chain *TestChain) GetContext() sdk.Context { return chain.App.GetBaseApp().NewContext(false, chain.CurrentHeader) @@ -385,6 +446,10 @@ func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix { return commitmenttypes.NewMerklePrefix(chain.App.GetIBCKeeper().ConnectionKeeper.GetCommitmentPrefix().Bytes()) } +func (chain *TestChain) GetSelfClientType() string { + return exported.Tendermint +} + // ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the // light client on the source chain. func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) { diff --git a/testing/chain_dymint.go b/testing/chain_dymint.go new file mode 100644 index 00000000000..4234a0c0952 --- /dev/null +++ b/testing/chain_dymint.go @@ -0,0 +1,255 @@ +package ibctesting + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version" + tmtypes "github.com/tendermint/tendermint/types" + tmversion "github.com/tendermint/tendermint/version" + + clienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v5/modules/core/23-commitment/types" + "github.com/cosmos/ibc-go/v5/modules/core/exported" + ibcdmtypes "github.com/cosmos/ibc-go/v5/modules/light-clients/01-dymint/types" + "github.com/cosmos/ibc-go/v5/testing/mock" +) + +type DymintConfig struct { + TrustingPeriod time.Duration + MaxClockDrift time.Duration +} + +func (tmcfg *DymintConfig) GetClientType() string { + return exported.Dymint +} + +var _ ClientConfig = &DymintConfig{} + +// TestChainDymint is a testing struct that 'wraps' a TestChain with the last DM Header, +// the current ABCI header. +type TestChainDymint struct { + TC *TestChain + + LastHeader *ibcdmtypes.Header // header for last block height committed + CurrentHeader tmproto.Header // header for current block height + +} + +var _ TestChainClientI = &TestChainDymint{} + +// NewChainDymintClient initializes the consunsus spesisifc pare of the TestChain +func NewChainDymintClient(tc *TestChain) *TestChainDymint { + + // create current header and call begin block + header := tmproto.Header{ + ChainID: tc.ChainID, + Height: 1, + Time: tc.Coordinator.CurrentTime.UTC(), + } + + // create an account to send transactions from + chain := &TestChainDymint{ + tc, + nil, + header, + } + + return chain +} + +func (chain *TestChainDymint) GetSelfClientType() string { + return exported.Dymint +} + +func (chain *TestChainDymint) NewConfig() ClientConfig { + return &DymintConfig{ + TrustingPeriod: TrustingPeriod, + MaxClockDrift: MaxClockDrift, + } +} + +// GetContext returns the current context for the application. +func (chain *TestChainDymint) GetContext() sdk.Context { + return chain.TC.App.GetBaseApp().NewContext(false, chain.CurrentHeader) +} + +// NextBlock sets the last header to the current header and increments the current header to be +// at the next block height. It does not update the time as that is handled by the Coordinator. +// +// CONTRACT: this function must only be called after app.Commit() occurs +func (chain *TestChainDymint) NextBlock() { + // set the last header to the current header + // use nil trusted fields + chain.LastHeader = chain.CurrentDMClientHeader() + + // increment the current header + chain.CurrentHeader = tmproto.Header{ + ChainID: chain.TC.ChainID, + Height: chain.TC.App.LastBlockHeight() + 1, + AppHash: chain.TC.App.LastCommitID().Hash, + // NOTE: the time is increased by the coordinator to maintain time synchrony amongst + // chains. + Time: chain.CurrentHeader.Time, + ValidatorsHash: chain.TC.Vals.Hash(), + NextValidatorsHash: chain.TC.Vals.Hash(), + } + + chain.BeginBlock() +} + +// // ConstructUpdateDMClientHeader will construct a valid 01-dymint Header to update the +// // light client on the source chain. +// func ConstructUpdateDMClientHeaderWithTrustedHeight(counterparty *TestChain, clientID string, trustedHeight clienttypes.Height) (*ibcdmtypes.Header, error) { +// header := counterparty.TestChainClient.GetLastHeader().(*ibcdmtypes.Header) + +// var ( +// tmTrustedVals *tmtypes.ValidatorSet +// ok bool +// ) +// // Once we get TrustedHeight from client, we must query the validators from the counterparty chain +// // If the LatestHeight == LastHeader.Height, then TrustedValidators are current validators +// // If LatestHeight < LastHeader.Height, we can query the historical validator set from HistoricalInfo +// if trustedHeight == header.GetHeight() { +// tmTrustedVals = counterparty.Vals +// } else { +// // NOTE: We need to get validators from counterparty at height: trustedHeight+1 +// // since the last trusted validators for a header at height h +// // is the NextValidators at h+1 committed to in header h by +// // NextValidatorsHash +// tmTrustedVals, ok = counterparty.GetValsAtHeight(int64(trustedHeight.RevisionHeight + 1)) +// if !ok { +// return nil, sdkerrors.Wrapf(ibcdmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted validators at trustedHeight: %d", trustedHeight) +// } +// } +// // inject trusted fields into last header +// // for now assume revision number is 0 +// header.TrustedHeight = trustedHeight + +// trustedVals, err := tmTrustedVals.ToProto() +// if err != nil { +// return nil, err +// } +// header.TrustedValidators = trustedVals + +// return header, nil +// } + +// CurrentDMClientHeader creates a DM header using the current header parameters +// on the chain. The trusted fields in the header are set to nil. +func (chain *TestChainDymint) CurrentDMClientHeader() *ibcdmtypes.Header { + return chain.CreateDMClientHeader(chain.TC.ChainID, chain.CurrentHeader.Height, clienttypes.Height{}, chain.CurrentHeader.Time, chain.TC.Vals, nil, chain.TC.Signers) +} + +// CreateDMClientHeader creates a DM header to update the DM client. Args are passed in to allow +// caller flexibility to use params that differ from the chain. +func (chain *TestChainDymint) CreateDMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, tmValSet, tmTrustedVals *tmtypes.ValidatorSet, signers map[string]tmtypes.PrivValidator) *ibcdmtypes.Header { + var ( + valSet *tmproto.ValidatorSet + trustedVals *tmproto.ValidatorSet + ) + require.NotNil(chain.TC.T, tmValSet) + + vsetHash := tmValSet.Hash() + + tmHeader := tmtypes.Header{ + Version: tmprotoversion.Consensus{Block: tmversion.BlockProtocol, App: 2}, + ChainID: chainID, + Height: blockHeight, + Time: timestamp, + LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)), + LastCommitHash: chain.TC.App.LastCommitID().Hash, + DataHash: tmhash.Sum([]byte("data_hash")), + ValidatorsHash: vsetHash, + NextValidatorsHash: vsetHash, + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: chain.CurrentHeader.AppHash, + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: tmValSet.Proposer.Address, //nolint:staticcheck + } + + hhash := tmHeader.Hash() + blockID := MakeBlockID(hhash, 3, tmhash.Sum([]byte("part_set"))) + voteSet := tmtypes.NewVoteSet(chainID, blockHeight, 1, tmproto.PrecommitType, tmValSet) + + // MakeCommit expects a signer array in the same order as the validator array. + // Thus we iterate over the ordered validator set and construct a signer array + // from the signer map in the same order. + var signerArr []tmtypes.PrivValidator //nolint:prealloc // using prealloc here would be needlessly complex + for _, v := range tmValSet.Validators { //nolint:staticcheck // need to check for nil validator set + signerArr = append(signerArr, signers[v.Address.String()]) + } + + commit, err := tmtypes.MakeCommit(blockID, blockHeight, 1, voteSet, signerArr, timestamp) + require.NoError(chain.TC.T, err) + + signedHeader := &tmproto.SignedHeader{ + Header: tmHeader.ToProto(), + Commit: commit.ToProto(), + } + + // only one sequencer can sign + pv, ok := signerArr[0].(mock.PV) + require.True(chain.TC.T, ok) + headerBytes, err := tmHeader.ToProto().Marshal() + require.NoError(chain.TC.T, err) + signedBytes, err := pv.PrivKey.Sign(headerBytes) + require.NoError(chain.TC.T, err) + + // Dymint check the header bytes signatures + signedHeader.Commit.Signatures[0].Signature = signedBytes + + valSet, err = tmValSet.ToProto() + require.NoError(chain.TC.T, err) + + if tmTrustedVals != nil { + trustedVals, err = tmTrustedVals.ToProto() + require.NoError(chain.TC.T, err) + } + + // The trusted fields may be nil. They may be filled before relaying messages to a client. + // The relayer is responsible for querying client and injecting appropriate trusted fields. + return &ibcdmtypes.Header{ + SignedHeader: signedHeader, + ValidatorSet: valSet, + TrustedHeight: trustedHeight, + TrustedValidators: trustedVals, + } +} + +// UpdateTimeForChain updates the clock for this chain. +func (chain *TestChainDymint) UpdateCurrentHeaderTime(t time.Time) { + chain.CurrentHeader.Time = t +} + +// BeginBlock signals the beginning of a block with chain.CurrentHeader +func (chain *TestChainDymint) BeginBlock() { + chain.TC.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader}) +} + +// ClientConfigToState builds the ClientState based on the clientConfig and last header +func (chain *TestChainDymint) ClientConfigToState(clientConfig ClientConfig) exported.ClientState { + tmConfig, ok := clientConfig.(*DymintConfig) + require.True(chain.TC.T, ok) + + height := chain.LastHeader.GetHeight().(clienttypes.Height) + clientState := ibcdmtypes.NewClientState( + chain.TC.ChainID, tmConfig.TrustingPeriod, tmConfig.MaxClockDrift, + height, commitmenttypes.GetSDKSpecs(), UpgradePath, + ) + return clientState +} + +// GetConsensusState returns the consensus state of the last header +func (chain *TestChainDymint) GetConsensusState() exported.ConsensusState { + return chain.LastHeader.ConsensusState() +} + +func (chain *TestChainDymint) GetLastHeader() interface{} { + return chain.LastHeader +} diff --git a/testing/coordinator.go b/testing/coordinator.go index f2cc49f9a91..63f4f2215d8 100644 --- a/testing/coordinator.go +++ b/testing/coordinator.go @@ -8,6 +8,8 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/ibc-go/v5/modules/core/exported" ) var ( @@ -25,6 +27,19 @@ type Coordinator struct { Chains map[string]*TestChain } +// UniqueStringLists returns a list without dupicates +func StringSliceRemoveDuplicates(stringSlice []string) []string { + keys := make(map[string]bool) + list := []string{} + for _, entry := range stringSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} + // NewCoordinator initializes Coordinator with N TestChain's func NewCoordinator(t *testing.T, n int) *Coordinator { chains := make(map[string]*TestChain) @@ -42,6 +57,40 @@ func NewCoordinator(t *testing.T, n int) *Coordinator { return coord } +// NewCoordinatorWithConsensusType initializes Coordinator with len(consensusTypes) TestChain's +// where the self client type of chain i is consensusTypes[i] +func NewCoordinatorWithConsensusType(t *testing.T, consensusTypes []string) *Coordinator { + chains := make(map[string]*TestChain) + coord := &Coordinator{ + T: t, + CurrentTime: globalStartTime, + } + + for i, consensusType := range consensusTypes { + chainID := GetChainID(i + 1) + + switch consensusType { + case exported.Dymint: + chains[chainID] = NewDymintTestChain(t, coord, chainID) + case exported.Tendermint: + chains[chainID] = NewTestChain(t, coord, chainID) + default: + return nil + } + + // Change to NewDymintTestChain + // add the consensusTypes to AllowedClients list + // clientKeeper := chains[chainID].App.GetIBCKeeper().ClientKeeper + // params := clientKeeper.GetParams(chains[chainID].GetContext()) + // clientKeeper.SetParams(chains[chainID].GetContext(), clienttypes.NewParams( + // StringSliceRemoveDuplicates(append(params.AllowedClients, consensusTypes...))..., + // )) + } + coord.Chains = chains + + return coord +} + // IncrementTime iterates through all the TestChain's and increments their current header time // by 5 seconds. // diff --git a/testing/simapp/app_dymint.go b/testing/simapp/app_dymint.go new file mode 100644 index 00000000000..75af1cf5c7b --- /dev/null +++ b/testing/simapp/app_dymint.go @@ -0,0 +1,485 @@ +package simapp + +import ( + "io" + + "github.com/cosmos/cosmos-sdk/baseapp" + _ "github.com/cosmos/cosmos-sdk/client/docs/statik" // this is used for serving docs + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/auth" + "github.com/cosmos/cosmos-sdk/x/auth/ante" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + authz "github.com/cosmos/cosmos-sdk/x/authz" + authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" + authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module" + "github.com/cosmos/cosmos-sdk/x/bank" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/capability" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/crisis" + crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + distr "github.com/cosmos/cosmos-sdk/x/distribution" + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/cosmos/cosmos-sdk/x/evidence" + evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + "github.com/cosmos/cosmos-sdk/x/feegrant" + feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module" + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/cosmos/cosmos-sdk/x/gov" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "github.com/cosmos/cosmos-sdk/x/mint" + mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + "github.com/cosmos/cosmos-sdk/x/params" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + "github.com/cosmos/cosmos-sdk/x/slashing" + slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/staking" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/cosmos/cosmos-sdk/x/upgrade" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/spf13/cast" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + dbm "github.com/tendermint/tm-db" + + ica "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts" + icacontroller "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/controller" + icacontrollerkeeper "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/controller/keeper" + icacontrollertypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/controller/types" + icahost "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host" + icahostkeeper "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/keeper" + icahosttypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/types" + icatypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/types" + ibcfee "github.com/cosmos/ibc-go/v5/modules/apps/29-fee" + ibcfeekeeper "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/keeper" + ibcfeetypes "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/types" + transfer "github.com/cosmos/ibc-go/v5/modules/apps/transfer" + ibctransferkeeper "github.com/cosmos/ibc-go/v5/modules/apps/transfer/keeper" + ibctransfertypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + ibc "github.com/cosmos/ibc-go/v5/modules/core" + ibcclient "github.com/cosmos/ibc-go/v5/modules/core/02-client" + ibcclienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + porttypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" + ibchost "github.com/cosmos/ibc-go/v5/modules/core/24-host" + ibckeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" + ibcmock "github.com/cosmos/ibc-go/v5/testing/mock" + simappparams "github.com/cosmos/ibc-go/v5/testing/simapp/params" +) + +// NewSimApp returns a reference to an initialized SimApp. +func NewSimAppWithDymint( + logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool, + homePath string, invCheckPeriod uint, encodingConfig simappparams.EncodingConfig, + appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), +) *SimApp { + appCodec := encodingConfig.Marshaler + legacyAmino := encodingConfig.Amino + interfaceRegistry := encodingConfig.InterfaceRegistry + + bApp := baseapp.NewBaseApp(appName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...) + bApp.SetCommitMultiStoreTracer(traceStore) + bApp.SetVersion(version.Version) + bApp.SetInterfaceRegistry(interfaceRegistry) + + keys := sdk.NewKVStoreKeys( + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, capabilitytypes.StoreKey, + authzkeeper.StoreKey, ibcfeetypes.StoreKey, + ) + tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) + memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) + + app := &SimApp{ + BaseApp: bApp, + legacyAmino: legacyAmino, + appCodec: appCodec, + interfaceRegistry: interfaceRegistry, + invCheckPeriod: invCheckPeriod, + keys: keys, + tkeys: tkeys, + memKeys: memKeys, + } + + app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) + + // set the BaseApp's parameter store + bApp.SetParamStore(app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable())) + + // add capability keeper and ScopeToModule for ibc module + app.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey]) + scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) + scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) + scopedICAControllerKeeper := app.CapabilityKeeper.ScopeToModule(icacontrollertypes.SubModuleName) + scopedICAHostKeeper := app.CapabilityKeeper.ScopeToModule(icahosttypes.SubModuleName) + + // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do + // not replicate if you do not need to test core IBC or light clients. + scopedIBCMockKeeper := app.CapabilityKeeper.ScopeToModule(ibcmock.ModuleName) + scopedFeeMockKeeper := app.CapabilityKeeper.ScopeToModule(MockFeePort) + scopedICAMockKeeper := app.CapabilityKeeper.ScopeToModule(ibcmock.ModuleName + icacontrollertypes.SubModuleName) + + // seal capability keeper after scoping modules + app.CapabilityKeeper.Seal() + + // SDK module keepers + + app.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, keys[authtypes.StoreKey], app.GetSubspace(authtypes.ModuleName), authtypes.ProtoBaseAccount, maccPerms, sdk.GetConfig().GetBech32AccountAddrPrefix(), + ) + app.BankKeeper = bankkeeper.NewBaseKeeper( + appCodec, keys[banktypes.StoreKey], app.AccountKeeper, app.GetSubspace(banktypes.ModuleName), app.ModuleAccountAddrs(), + ) + stakingKeeper := stakingkeeper.NewKeeper( + appCodec, keys[stakingtypes.StoreKey], app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName), + ) + app.MintKeeper = mintkeeper.NewKeeper( + appCodec, keys[minttypes.StoreKey], app.GetSubspace(minttypes.ModuleName), &stakingKeeper, + app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, + ) + app.DistrKeeper = distrkeeper.NewKeeper( + appCodec, keys[distrtypes.StoreKey], app.GetSubspace(distrtypes.ModuleName), app.AccountKeeper, app.BankKeeper, + &stakingKeeper, authtypes.FeeCollectorName, + ) + + app.SlashingKeeper = slashingkeeper.NewKeeper( + appCodec, keys[slashingtypes.StoreKey], &stakingKeeper, app.GetSubspace(slashingtypes.ModuleName), + ) + app.CrisisKeeper = crisiskeeper.NewKeeper( + app.GetSubspace(crisistypes.ModuleName), invCheckPeriod, app.BankKeeper, authtypes.FeeCollectorName, + ) + + app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegrant.StoreKey], app.AccountKeeper) + app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + + // register the staking hooks + // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks + app.StakingKeeper = *stakingKeeper.SetHooks( + stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()), + ) + + app.AuthzKeeper = authzkeeper.NewKeeper(keys[authzkeeper.StoreKey], appCodec, app.MsgServiceRouter(), app.AccountKeeper) + + // IBC Keepers + + app.IBCKeeper = ibckeeper.NewKeeperWithDymint( + appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper, + ) + + // register the proposal types + govRouter := govv1beta1.NewRouter() + govRouter.AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler). + AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)). + AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)). + AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)). + AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper)) + + govConfig := govtypes.DefaultConfig() + /* + Example of setting gov params: + govConfig.MaxMetadataLen = 10000 + */ + govKeeper := govkeeper.NewKeeper( + appCodec, keys[govtypes.StoreKey], app.GetSubspace(govtypes.ModuleName), app.AccountKeeper, app.BankKeeper, + &stakingKeeper, govRouter, app.MsgServiceRouter(), govConfig, + ) + + app.GovKeeper = *govKeeper.SetHooks( + govtypes.NewMultiGovHooks( + // register the governance hooks + ), + ) + + // IBC Fee Module keeper + app.IBCFeeKeeper = ibcfeekeeper.NewKeeper( + appCodec, keys[ibcfeetypes.StoreKey], app.GetSubspace(ibcfeetypes.ModuleName), + app.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, app.AccountKeeper, app.BankKeeper, + ) + + // ICA Controller keeper + app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( + appCodec, keys[icacontrollertypes.StoreKey], app.GetSubspace(icacontrollertypes.SubModuleName), + app.IBCFeeKeeper, // use ics29 fee as ics4Wrapper in middleware stack + app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper, + scopedICAControllerKeeper, app.MsgServiceRouter(), + ) + + // ICA Host keeper + app.ICAHostKeeper = icahostkeeper.NewKeeper( + appCodec, keys[icahosttypes.StoreKey], app.GetSubspace(icahosttypes.SubModuleName), + app.IBCFeeKeeper, // use ics29 fee as ics4Wrapper in middleware stack + app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper, + app.AccountKeeper, scopedICAHostKeeper, app.MsgServiceRouter(), + ) + + // Create IBC Router + ibcRouter := porttypes.NewRouter() + + // Middleware Stacks + + // Create Transfer Keeper and pass IBCFeeKeeper as expected Channel and PortKeeper + // since fee middleware will wrap the IBCKeeper for underlying application. + app.TransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, keys[ibctransfertypes.StoreKey], app.GetSubspace(ibctransfertypes.ModuleName), + app.IBCFeeKeeper, // ISC4 Wrapper: fee IBC middleware + app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper, + app.AccountKeeper, app.BankKeeper, scopedTransferKeeper, + ) + + // Mock Module Stack + + // Mock Module setup for testing IBC and also acts as the interchain accounts authentication module + // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do + // not replicate if you do not need to test core IBC or light clients. + mockModule := ibcmock.NewAppModule(&app.IBCKeeper.PortKeeper) + + // The mock module is used for testing IBC + mockIBCModule := ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp(ibcmock.ModuleName, scopedIBCMockKeeper)) + ibcRouter.AddRoute(ibcmock.ModuleName, mockIBCModule) + + // Create Transfer Stack + // SendPacket, since it is originating from the application to core IBC: + // transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket + + // RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way + // channel.RecvPacket -> fee.OnRecvPacket -> transfer.OnRecvPacket + + // transfer stack contains (from top to bottom): + // - IBC Fee Middleware + // - Transfer + + // create IBC module from bottom to top of stack + var transferStack porttypes.IBCModule + transferStack = transfer.NewIBCModule(app.TransferKeeper) + transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper) + + // Add transfer stack to IBC Router + ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + + // Create Interchain Accounts Stack + // SendPacket, since it is originating from the application to core IBC: + // icaAuthModuleKeeper.SendTx -> icaController.SendPacket -> fee.SendPacket -> channel.SendPacket + + // initialize ICA module with mock module as the authentication module on the controller side + var icaControllerStack porttypes.IBCModule + icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp("", scopedICAMockKeeper)) + app.ICAAuthModule = icaControllerStack.(ibcmock.IBCModule) + icaControllerStack = icacontroller.NewIBCMiddleware(icaControllerStack, app.ICAControllerKeeper) + icaControllerStack = ibcfee.NewIBCMiddleware(icaControllerStack, app.IBCFeeKeeper) + + // RecvPacket, message that originates from core IBC and goes down to app, the flow is: + // channel.RecvPacket -> fee.OnRecvPacket -> icaHost.OnRecvPacket + + var icaHostStack porttypes.IBCModule + icaHostStack = icahost.NewIBCModule(app.ICAHostKeeper) + icaHostStack = ibcfee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper) + + // Add host, controller & ica auth modules to IBC router + ibcRouter. + // the ICA Controller middleware needs to be explicitly added to the IBC Router because the + // ICA controller module owns the port capability for ICA. The ICA authentication module + // owns the channel capability. + AddRoute(icacontrollertypes.SubModuleName, icaControllerStack). + AddRoute(icahosttypes.SubModuleName, icaHostStack). + AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack) + + // Create Mock IBC Fee module stack for testing + // SendPacket, since it is originating from the application to core IBC: + // mockModule.SendPacket -> fee.SendPacket -> channel.SendPacket + + // OnRecvPacket, message that originates from core IBC and goes down to app, the flow is the otherway + // channel.RecvPacket -> fee.OnRecvPacket -> mockModule.OnRecvPacket + + // OnAcknowledgementPacket as this is where fee's are paid out + // mockModule.OnAcknowledgementPacket -> fee.OnAcknowledgementPacket -> channel.OnAcknowledgementPacket + + // create fee wrapped mock module + feeMockModule := ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp(MockFeePort, scopedFeeMockKeeper)) + app.FeeMockModule = feeMockModule + feeWithMockModule := ibcfee.NewIBCMiddleware(feeMockModule, app.IBCFeeKeeper) + ibcRouter.AddRoute(MockFeePort, feeWithMockModule) + + // Seal the IBC Router + app.IBCKeeper.SetRouter(ibcRouter) + + // create evidence keeper with router + evidenceKeeper := evidencekeeper.NewKeeper( + appCodec, keys[evidencetypes.StoreKey], &app.StakingKeeper, app.SlashingKeeper, + ) + // If evidence needs to be handled for the app, set routes in router here and seal + app.EvidenceKeeper = *evidenceKeeper + + /**** Module Options ****/ + + // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment + // we prefer to be more strict in what arguments the modules expect. + skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)) + + // NOTE: Any module instantiated in the module manager that is later modified + // must be passed by reference here. + app.mm = module.NewManager( + // SDK app modules + genutil.NewAppModule( + app.AccountKeeper, app.StakingKeeper, app.BaseApp.DeliverTx, + encodingConfig.TxConfig, + ), + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts), + vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper), + capability.NewAppModule(appCodec, *app.CapabilityKeeper), + crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants), + feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), + gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), + upgrade.NewAppModule(app.UpgradeKeeper), + evidence.NewAppModule(app.EvidenceKeeper), + ibc.NewAppModule(app.IBCKeeper), + params.NewAppModule(app.ParamsKeeper), + authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + + // IBC modules + transfer.NewAppModule(app.TransferKeeper), + ibcfee.NewAppModule(app.IBCFeeKeeper), + ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + mockModule, + ) + + // During begin block slashing happens after distr.BeginBlocker so that + // there is nothing left over in the validator fee pool, so as to keep the + // CanWithdrawInvariant invariant. + // NOTE: staking module is required if HistoricalEntries param > 0 + // NOTE: capability module's beginblocker must come before any modules using capabilities (e.g. IBC) + app.mm.SetOrderBeginBlockers( + upgradetypes.ModuleName, capabilitytypes.ModuleName, minttypes.ModuleName, distrtypes.ModuleName, slashingtypes.ModuleName, + evidencetypes.ModuleName, stakingtypes.ModuleName, ibchost.ModuleName, ibctransfertypes.ModuleName, authtypes.ModuleName, + banktypes.ModuleName, govtypes.ModuleName, crisistypes.ModuleName, genutiltypes.ModuleName, authz.ModuleName, feegrant.ModuleName, + paramstypes.ModuleName, vestingtypes.ModuleName, icatypes.ModuleName, ibcfeetypes.ModuleName, ibcmock.ModuleName, + ) + app.mm.SetOrderEndBlockers( + crisistypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, ibchost.ModuleName, ibctransfertypes.ModuleName, + capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, slashingtypes.ModuleName, + minttypes.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, feegrant.ModuleName, paramstypes.ModuleName, + upgradetypes.ModuleName, vestingtypes.ModuleName, icatypes.ModuleName, ibcfeetypes.ModuleName, ibcmock.ModuleName, + ) + + // NOTE: The genutils module must occur after staking so that pools are + // properly initialized with tokens from genesis accounts. + // NOTE: Capability module must occur first so that it can initialize any capabilities + // so that other modules that want to create or claim capabilities afterwards in InitChain + // can do so safely. + app.mm.SetOrderInitGenesis( + capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, + slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName, + ibchost.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, + icatypes.ModuleName, ibcfeetypes.ModuleName, ibcmock.ModuleName, feegrant.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, vestingtypes.ModuleName, + ) + + app.mm.RegisterInvariants(&app.CrisisKeeper) + app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) + app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.mm.RegisterServices(app.configurator) + + // add test gRPC service for testing gRPC queries in isolation + testdata.RegisterQueryServer(app.GRPCQueryRouter(), testdata.QueryImpl{}) + + // create the simulation manager and define the order of the modules for deterministic simulations + // + // NOTE: this is not required apps that don't use the simulator for fuzz testing + // transactions + app.sm = module.NewSimulationManager( + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper), + capability.NewAppModule(appCodec, *app.CapabilityKeeper), + feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), + gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + params.NewAppModule(app.ParamsKeeper), + evidence.NewAppModule(app.EvidenceKeeper), + authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + ibc.NewAppModule(app.IBCKeeper), + transfer.NewAppModule(app.TransferKeeper), + ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + ) + + app.sm.RegisterStoreDecoders() + + // initialize stores + app.MountKVStores(keys) + app.MountTransientStores(tkeys) + app.MountMemoryStores(memKeys) + + // initialize BaseApp + app.SetInitChainer(app.InitChainer) + app.SetBeginBlocker(app.BeginBlocker) + anteHandler, err := NewAnteHandler( + HandlerOptions{ + HandlerOptions: ante.HandlerOptions{ + AccountKeeper: app.AccountKeeper, + BankKeeper: app.BankKeeper, + SignModeHandler: encodingConfig.TxConfig.SignModeHandler(), + FeegrantKeeper: app.FeeGrantKeeper, + SigGasConsumer: ante.DefaultSigVerificationGasConsumer, + }, + IBCKeeper: app.IBCKeeper, + }, + ) + if err != nil { + panic(err) + } + + app.SetAnteHandler(anteHandler) + + app.SetEndBlocker(app.EndBlocker) + + app.setupUpgradeHandlers() + + if loadLatest { + if err := app.LoadLatestVersion(); err != nil { + tmos.Exit(err.Error()) + } + } + + app.ScopedIBCKeeper = scopedIBCKeeper + app.ScopedTransferKeeper = scopedTransferKeeper + app.ScopedICAControllerKeeper = scopedICAControllerKeeper + app.ScopedICAHostKeeper = scopedICAHostKeeper + + // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do + // note replicate if you do not need to test core IBC or light clients. + app.ScopedIBCMockKeeper = scopedIBCMockKeeper + app.ScopedICAMockKeeper = scopedICAMockKeeper + app.ScopedFeeMockKeeper = scopedFeeMockKeeper + + return app +} diff --git a/testing/simapp/test_helpers.go b/testing/simapp/test_helpers.go index ab04f77afa1..6f0bb2e370b 100644 --- a/testing/simapp/test_helpers.go +++ b/testing/simapp/test_helpers.go @@ -205,6 +205,7 @@ func SetupWithGenesisAccounts(genAccs []authtypes.GenesisAccount, balances ...ba }, ) + // app.EndBlock(abci.RequestEndBlock{Height: app.LastBlockHeight()}) app.Commit() app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: app.LastBlockHeight() + 1}})