Skip to content

Commit

Permalink
feat: persistent storage
Browse files Browse the repository at this point in the history
Signed-off-by: Artur Troian <[email protected]>
  • Loading branch information
troian committed Aug 27, 2021
1 parent a7163d0 commit 01837ac
Show file tree
Hide file tree
Showing 107 changed files with 7,207 additions and 4,402 deletions.
31 changes: 31 additions & 0 deletions _docs/kustomize/storage/storageclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
name: beta2
labels:
akash.network: "true"
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
name: standard
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: akash.network/v1
kind: StorageClassState
metadata:
name: beta2
labels:
akash.network/component: "storage"
akash.network: "true"
spec:
capacity: -1
10 changes: 5 additions & 5 deletions _run/common-kind.mk
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
# KinD, it's fine to use other names locally, however in GH container name
# is configured by engineerd/setup-kind. `kind-control-plane` is the docker
# image's name in GH Actions.
KIND_NAME ?= $(shell basename $$PWD)
export KIND_NAME ?= $(shell basename $$PWD)

KINDEST_VERSION ?= v1.21.1
KIND_IMG ?= kindest/node:$(KINDEST_VERSION)
KINDEST_VERSION ?= v1.21.1
KIND_IMG ?= kindest/node:$(KINDEST_VERSION)

K8S_CONTEXT ?= $(shell kubectl config current-context)
KIND_HTTP_PORT ?= $(shell docker inspect \
K8S_CONTEXT ?= $(shell kubectl config current-context)
KIND_HTTP_PORT ?= $(shell docker inspect \
--type container "$(KIND_NAME)-control-plane" \
--format '{{index .NetworkSettings.Ports "80/tcp" 0 "HostPort"}}')

Expand Down
5 changes: 5 additions & 0 deletions _run/kube/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM alpine

ADD ./test.sh /bin/

ENTRYPOINT /bin/test.sh
42 changes: 42 additions & 0 deletions _run/kube/grafana.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
---
version: "2.0"

services:
grafana:
image: grafana/grafana
expose:
- port: 3000
as: 80
to:
- global: true
params:
storage:
data:
mount: /var/lib/grafana
profiles:
compute:
grafana:
resources:
cpu:
units: 1
memory:
size: 1Gi
storage:
- size: 512Mi
- name: data
size: 1Gi
attributes:
persistent: true
placement:
westcoast:
attributes:
region: us-west
pricing:
grafana:
denom: uakt
amount: 1000
deployment:
grafana:
westcoast:
profile: grafana
count: 1
14 changes: 14 additions & 0 deletions _run/kube/test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/usr/bin/env ash

FILE=/var/lib/testdata/test

if [[ -f $FILE ]]; then
echo "test file exists. data survived"
echo "content of the file"
cat $FILE
else
echo "initializing persistence test file"
echo "Akash Persistence welcomes you" > $FILE
fi

/docker-entrypoint.sh
14 changes: 7 additions & 7 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ require (
github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect
github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect
github.com/libp2p/go-buffer-pool v0.0.3-0.20190619091711-d94255cb3dfc // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.11.0
Expand All @@ -41,12 +41,12 @@ require (
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c
google.golang.org/grpc v1.38.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.19.3
k8s.io/apimachinery v0.20.2
k8s.io/client-go v0.19.3
k8s.io/code-generator v0.19.3
k8s.io/kubectl v0.19.3
k8s.io/metrics v0.19.3
k8s.io/api v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/client-go v0.21.3
k8s.io/code-generator v0.21.3
k8s.io/kubectl v0.21.3
k8s.io/metrics v0.21.3
sigs.k8s.io/kind v0.11.1
)

Expand Down
207 changes: 207 additions & 0 deletions integration/app_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,207 @@
package integration

import (
"encoding/json"
"fmt"
"path/filepath"

"github.com/cosmos/cosmos-sdk/client/flags"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

providerCmd "github.com/ovrclk/akash/provider/cmd"
ptestutil "github.com/ovrclk/akash/provider/testutil"
"github.com/ovrclk/akash/sdl"
deploycli "github.com/ovrclk/akash/x/deployment/client/cli"
dtypes "github.com/ovrclk/akash/x/deployment/types"
mcli "github.com/ovrclk/akash/x/market/client/cli"
mtypes "github.com/ovrclk/akash/x/market/types"
)

type E2EApp struct {
IntegrationTestSuite
}

func (s *E2EApp) TestE2EApp() {
// create a deployment
deploymentPath, err := filepath.Abs("../x/deployment/testdata/deployment-v2.yaml")
s.Require().NoError(err)

cctxJSON := s.validator.ClientCtx.WithOutputFormat("json")

deploymentID := dtypes.DeploymentID{
Owner: s.keyTenant.GetAddress().String(),
DSeq: uint64(103),
}

// Create Deployments and assert query to assert
tenantAddr := s.keyTenant.GetAddress().String()
res, err := deploycli.TxCreateDeploymentExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--dseq=%v", deploymentID.DSeq),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())

// Test query deployments ---------------------------------------------
res, err = deploycli.QueryDeploymentsExec(cctxJSON)
s.Require().NoError(err)

deployResp := &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), deployResp)
s.Require().NoError(err)
s.Require().Len(deployResp.Deployments, 1, "Deployment Create Failed")
deployments := deployResp.Deployments
s.Require().Equal(tenantAddr, deployments[0].Deployment.DeploymentID.Owner)

// test query deployment
createdDep := deployments[0]
res, err = deploycli.QueryDeploymentExec(cctxJSON, createdDep.Deployment.DeploymentID)
s.Require().NoError(err)

deploymentResp := dtypes.QueryDeploymentResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), &deploymentResp)
s.Require().NoError(err)
s.Require().Equal(createdDep, deploymentResp)
s.Require().NotEmpty(deploymentResp.Deployment.Version)

// test query deployments with filters -----------------------------------
res, err = deploycli.QueryDeploymentsExec(
s.validator.ClientCtx.WithOutputFormat("json"),
fmt.Sprintf("--owner=%s", tenantAddr),
fmt.Sprintf("--dseq=%v", createdDep.Deployment.DeploymentID.DSeq),
)
s.Require().NoError(err, "Error when fetching deployments with owner filter")

deployResp = &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), deployResp)
s.Require().NoError(err)
s.Require().Len(deployResp.Deployments, 1)

// Assert orders created by provider
// test query orders
res, err = mcli.QueryOrdersExec(cctxJSON)
s.Require().NoError(err)

result := &mtypes.QueryOrdersResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), result)
s.Require().NoError(err)
s.Require().Len(result.Orders, 1)
orders := result.Orders
s.Require().Equal(tenantAddr, orders[0].OrderID.Owner)

// Wait for then EndBlock to handle bidding and creating lease
s.Require().NoError(s.waitForBlocksCommitted(15))

// Assert provider made bid and created lease; test query leases
// Assert provider made bid and created lease; test query leases
res, err = mcli.QueryBidsExec(cctxJSON)
s.Require().NoError(err)
bidsRes := &mtypes.QueryBidsResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), bidsRes)
s.Require().NoError(err)
s.Require().Len(bidsRes.Bids, 1)

res, err = mcli.TxCreateLeaseExec(
cctxJSON,
bidsRes.Bids[0].Bid.BidID,
s.keyTenant.GetAddress(),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(6))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())

res, err = mcli.QueryLeasesExec(cctxJSON)
s.Require().NoError(err)

leaseRes := &mtypes.QueryLeasesResponse{}
err = s.validator.ClientCtx.Codec.UnmarshalJSON(res.Bytes(), leaseRes)
s.Require().NoError(err)
s.Require().Len(leaseRes.Leases, 1)

lease := newestLease(leaseRes.Leases)
lid := lease.LeaseID
s.Require().Equal(s.keyProvider.GetAddress().String(), lid.Provider)

// Send Manifest to Provider ----------------------------------------------
_, err = ptestutil.TestSendManifest(
cctxJSON,
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(20))

appURL := fmt.Sprintf("http://%s:%s/", s.appHost, s.appPort)
queryApp(s.T(), appURL, 50)

cmdResult, err := providerCmd.ProviderStatusExec(s.validator.ClientCtx, lid.Provider)
assert.NoError(s.T(), err)
data := make(map[string]interface{})
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
leaseCount, ok := data["cluster"].(map[string]interface{})["leases"]
assert.True(s.T(), ok)
assert.Equal(s.T(), float64(1), leaseCount)

// Read SDL into memory so each service can be checked
deploymentSdl, err := sdl.ReadFile(deploymentPath)
require.NoError(s.T(), err)
mani, err := deploymentSdl.Manifest()
require.NoError(s.T(), err)

cmdResult, err = providerCmd.ProviderLeaseStatusExec(
s.validator.ClientCtx,
fmt.Sprintf("--%s=%v", "dseq", lid.DSeq),
fmt.Sprintf("--%s=%v", "gseq", lid.GSeq),
fmt.Sprintf("--%s=%v", "oseq", lid.OSeq),
fmt.Sprintf("--%s=%v", "provider", lid.Provider),
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
assert.NoError(s.T(), err)
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
for _, group := range mani.GetGroups() {
for _, service := range group.Services {
serviceTotalCount, ok := data["services"].(map[string]interface{})[service.Name].(map[string]interface{})["total"]
assert.True(s.T(), ok)
assert.Greater(s.T(), serviceTotalCount, float64(0))
}
}

for _, group := range mani.GetGroups() {
for _, service := range group.Services {
cmdResult, err = providerCmd.ProviderServiceStatusExec(
s.validator.ClientCtx,
fmt.Sprintf("--%s=%v", "dseq", lid.DSeq),
fmt.Sprintf("--%s=%v", "gseq", lid.GSeq),
fmt.Sprintf("--%s=%v", "oseq", lid.OSeq),
fmt.Sprintf("--%s=%v", "provider", lid.Provider),
fmt.Sprintf("--%s=%v", "service", service.Name),
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
assert.NoError(s.T(), err)
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
serviceTotalCount, ok := data["services"].(map[string]interface{})[service.Name].(map[string]interface{})["total"]
assert.True(s.T(), ok)
assert.Greater(s.T(), serviceTotalCount, float64(0))
}
}
}
Loading

0 comments on commit 01837ac

Please sign in to comment.