From a4f150f6101e6a446ea2d40b92e06f3c890410ae Mon Sep 17 00:00:00 2001 From: Daniel Pacak Date: Tue, 12 May 2020 00:19:15 +0200 Subject: [PATCH] chore(cmd): Merge kubectl-starboard repository in (#4) Signed-off-by: Daniel Pacak --- .github/workflows/{build.yml => build.yaml} | 9 + .github/workflows/release.yaml | 26 ++ .gitignore | 5 +- .goreleaser.yaml | 27 +++ Makefile | 10 + cmd/kubectl-starboard/main.go | 35 +++ go.mod | 8 + go.sum | 36 ++- kube/starboard.yaml | 179 ++++++++++++++ pkg/cmd/cleanup.go | 31 +++ pkg/cmd/find.go | 16 ++ pkg/cmd/find_vulnerabilities.go | 79 ++++++ pkg/cmd/init.go | 31 +++ pkg/cmd/kube_bench.go | 36 +++ pkg/cmd/kube_hunter.go | 39 +++ pkg/cmd/polaris.go | 40 +++ pkg/cmd/rbac.go | 21 ++ pkg/cmd/root.go | 74 ++++++ pkg/cmd/root_test.go | 212 ++++++++++++++++ pkg/docker/config.go | 47 ++++ pkg/docker/config_test.go | 74 ++++++ pkg/ext/clock.go | 34 +++ pkg/ext/math.go | 8 + pkg/ext/math_test.go | 37 +++ pkg/find/vulnerabilities/crd/writer.go | 49 ++++ pkg/find/vulnerabilities/scanner.go | 12 + pkg/find/vulnerabilities/trivy/converter.go | 88 +++++++ .../vulnerabilities/trivy/converter_test.go | 1 + pkg/find/vulnerabilities/trivy/model.go | 22 ++ pkg/find/vulnerabilities/trivy/scanner.go | 229 ++++++++++++++++++ pkg/find/vulnerabilities/writer.go | 10 + pkg/kube/cr_manager.go | 83 +++++++ pkg/kube/pod/manager.go | 124 ++++++++++ pkg/kube/runnable_job.go | 82 +++++++ pkg/kube/secret/manager.go | 82 +++++++ pkg/kube/workload.go | 77 ++++++ pkg/kubebench/crd/writer.go | 44 ++++ pkg/kubebench/model.go | 38 +++ pkg/kubebench/scanner.go | 207 ++++++++++++++++ pkg/kubebench/writer.go | 9 + pkg/kubehunter/crd/writer.go | 45 ++++ pkg/kubehunter/model.go | 21 ++ pkg/kubehunter/scanner.go | 128 ++++++++++ pkg/kubehunter/writer.go | 9 + pkg/polaris/converter.go | 93 +++++++ pkg/polaris/converter_test.go | 77 ++++++ pkg/polaris/crd/writer.go | 61 +++++ pkg/polaris/model.go | 42 ++++ pkg/polaris/scanner.go | 142 +++++++++++ pkg/polaris/test_fixture/polaris-report.json | 63 +++++ pkg/polaris/writer.go | 15 ++ pkg/runner/runner.go | 67 +++++ 52 files changed, 3032 insertions(+), 2 deletions(-) rename .github/workflows/{build.yml => build.yaml} (61%) create mode 100644 .github/workflows/release.yaml create mode 100644 .goreleaser.yaml create mode 100644 Makefile create mode 100644 cmd/kubectl-starboard/main.go create mode 100644 kube/starboard.yaml create mode 100644 pkg/cmd/cleanup.go create mode 100644 pkg/cmd/find.go create mode 100644 pkg/cmd/find_vulnerabilities.go create mode 100644 pkg/cmd/init.go create mode 100644 pkg/cmd/kube_bench.go create mode 100644 pkg/cmd/kube_hunter.go create mode 100644 pkg/cmd/polaris.go create mode 100644 pkg/cmd/rbac.go create mode 100644 pkg/cmd/root.go create mode 100644 pkg/cmd/root_test.go create mode 100644 pkg/docker/config.go create mode 100644 pkg/docker/config_test.go create mode 100644 pkg/ext/clock.go create mode 100644 pkg/ext/math.go create mode 100644 pkg/ext/math_test.go create mode 100644 pkg/find/vulnerabilities/crd/writer.go create mode 100644 pkg/find/vulnerabilities/scanner.go create mode 100644 pkg/find/vulnerabilities/trivy/converter.go create mode 100644 pkg/find/vulnerabilities/trivy/converter_test.go create mode 100644 pkg/find/vulnerabilities/trivy/model.go create mode 100644 pkg/find/vulnerabilities/trivy/scanner.go create mode 100644 pkg/find/vulnerabilities/writer.go create mode 100644 pkg/kube/cr_manager.go create mode 100644 pkg/kube/pod/manager.go create mode 100644 pkg/kube/runnable_job.go create mode 100644 pkg/kube/secret/manager.go create mode 100644 pkg/kube/workload.go create mode 100644 pkg/kubebench/crd/writer.go create mode 100644 pkg/kubebench/model.go create mode 100644 pkg/kubebench/scanner.go create mode 100644 pkg/kubebench/writer.go create mode 100644 pkg/kubehunter/crd/writer.go create mode 100644 pkg/kubehunter/model.go create mode 100644 pkg/kubehunter/scanner.go create mode 100644 pkg/kubehunter/writer.go create mode 100644 pkg/polaris/converter.go create mode 100644 pkg/polaris/converter_test.go create mode 100644 pkg/polaris/crd/writer.go create mode 100644 pkg/polaris/model.go create mode 100644 pkg/polaris/scanner.go create mode 100644 pkg/polaris/test_fixture/polaris-report.json create mode 100644 pkg/polaris/writer.go create mode 100644 pkg/runner/runner.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yaml similarity index 61% rename from .github/workflows/build.yml rename to .github/workflows/build.yaml index b43bec5e7..2874b1514 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yaml @@ -19,3 +19,12 @@ jobs: run: go mod vendor - name: Verify generated code run: GOPATH="$(go env GOPATH)" ./hack/verify-codegen.sh + - name: Run unit tests + run: make test + - name: Upload code coverage + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./coverage.txt + - name: Build executable binary + run: make build diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..b5fce9b85 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,26 @@ +name: release +on: + push: + tags: + - "v*" +jobs: + release: + name: Release + runs-on: ubuntu-18.04 + steps: + - name: Setup Go + uses: actions/setup-go@v1 + with: + go-version: 1.14 + - name: Checkout code + uses: actions/checkout@v2 + - name: Unshallow # This step is required for the changelog to work correctly + run: git fetch --prune --unshallow + - name: Run unit tests + run: make test + - name: Release + uses: goreleaser/goreleaser-action@v1 + with: + args: release --rm-dist + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index fc8e44b0c..b7a58ef2e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ .idea/ -vendor/ \ No newline at end of file +bin/ +dist/ + +coverage.txt diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 000000000..05c66341e --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,27 @@ +env: + - GO111MODULE=on +before: + hooks: + - go mod download +builds: + - id: build-kubectl-starboard + main: ./cmd/kubectl-starboard/main.go + binary: kubectl-starboard + env: + - CGO_ENABLED=0 +archives: + - replacements: + darwin: Darwin + linux: Linux + 386: i386 + amd64: x86_64 +checksum: + name_template: "checksums.txt" +snapshot: + name_template: "{{ .FullCommit }}" +changelog: + sort: asc + filters: + exclude: + - '^docs' + - '^test' diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..e7dfe9a8b --- /dev/null +++ b/Makefile @@ -0,0 +1,10 @@ +SOURCES := $(shell find . -name '*.go') +BINARY := kubectl-starboard + +build: kubectl-starboard + +$(BINARY): $(SOURCES) + CGO_ENABLED=0 go build -o ./bin/$(BINARY) ./cmd/kubectl-starboard/main.go + +test: $(SOURCES) + go test -v -short -race -timeout 30s -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cmd/kubectl-starboard/main.go b/cmd/kubectl-starboard/main.go new file mode 100644 index 000000000..60d1fda7a --- /dev/null +++ b/cmd/kubectl-starboard/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/spf13/pflag" + + "github.com/aquasecurity/starboard/pkg/cmd" + "k8s.io/klog" +) + +func main() { + defer klog.Flush() + + initFlags() + + if err := cmd.GetRootCmd().Execute(); err != nil { + fmt.Printf("error: %v\n", err) + os.Exit(1) + } +} + +func initFlags() { + klog.InitFlags(nil) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + + // Hide all klog flags except for -v + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if f.Name != "v" { + pflag.Lookup(f.Name).Hidden = true + } + }) +} diff --git a/go.mod b/go.mod index 33867f7a2..b6f670327 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,16 @@ module github.com/aquasecurity/starboard go 1.14 require ( + github.com/google/uuid v1.1.1 + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.5.1 + k8s.io/api v0.17.5 k8s.io/apiextensions-apiserver v0.17.5 k8s.io/apimachinery v0.17.5 + k8s.io/cli-runtime v0.17.5 + k8s.io/client-go v0.17.5 k8s.io/code-generator v0.17.5 + k8s.io/klog v1.0.0 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f ) diff --git a/go.sum b/go.sum index 2e980db95..788538274 100644 --- a/go.sum +++ b/go.sum @@ -57,10 +57,13 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -120,14 +123,17 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -137,21 +143,28 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -171,6 +184,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -198,13 +213,17 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -227,6 +246,7 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -240,8 +260,9 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -265,6 +286,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -296,6 +318,7 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ym golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -316,6 +339,7 @@ golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -324,6 +348,7 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -350,6 +375,7 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmK google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -362,12 +388,14 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -379,12 +407,16 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.17.5 h1:EkVieIbn1sC8YCDwckLKLpf+LoVofXYW72+LTZWo4aQ= k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= k8s.io/apiextensions-apiserver v0.17.5 h1:1MvO6pRopn9ZHweFEVFxnWDRpMd3ZE7SPY156qDnOeI= k8s.io/apiextensions-apiserver v0.17.5/go.mod h1:Up8qgvIy2v9521+YBhg7fhVtd4jgh/1MjotWr5GvOn4= k8s.io/apimachinery v0.17.5 h1:QAjfgeTtSGksdkgyaPrIb4lhU16FWMIzxKejYD5S0gc= k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apiserver v0.17.5/go.mod h1:yo2cFZJ7AUj6BYYRWzEzs2cLtkY6F6zdxs8GhLu5V28= +k8s.io/cli-runtime v0.17.5 h1:WIlIsuLppbKU6Oixlx1LTAz+e62wj1guQWLphhvZWZg= +k8s.io/cli-runtime v0.17.5/go.mod h1:MgU0RZdbJoDThMLacP4ik4W7qpI0wOf2uiMyzVvB/BE= +k8s.io/client-go v0.17.5 h1:Sm/9AQ415xPAX42JLKbJZnreXFgD2rVfDUDwOTm0gzA= k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= k8s.io/code-generator v0.17.5 h1:JKh5hYOFb0cTls9mce3ZC4DWh01/nLEgqj8OSJBpVRw= k8s.io/code-generator v0.17.5/go.mod h1:qdiSCSTKtS+3WtPelj2h57fylSQcPUlhMVm+TD9Dvqc= @@ -405,6 +437,8 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/kube/starboard.yaml b/kube/starboard.yaml new file mode 100644 index 000000000..2ce65ac32 --- /dev/null +++ b/kube/starboard.yaml @@ -0,0 +1,179 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: starboard +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: starboard + namespace: starboard +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: starboard +rules: + - apiGroups: + - apps + - batch + - "" + resources: + - deployments + - statefulsets + - daemonsets + - jobs + - cronjobs + - replicationcontrollers + - nodes + - namespaces + - pods + verbs: + - list + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: starboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: starboard +subjects: + - kind: ServiceAccount + name: starboard + namespace: starboard +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: polaris-config + namespace: starboard +data: + config.yaml: | + checks: + # resources + cpuRequestsMissing: warning + cpuLimitsMissing: warning + memoryRequestsMissing: warning + memoryLimitsMissing: warning + # images + tagNotSpecified: error + pullPolicyNotAlways: ignore + # healthChecks + readinessProbeMissing: warning + livenessProbeMissing: warning + # networking + hostNetworkSet: warning + hostPortSet: warning + # security + hostIPCSet: error + hostPIDSet: error + notReadOnlyRootFileSystem: warning + privilegeEscalationAllowed: error + runAsRootAllowed: warning + runAsPrivileged: error + dangerousCapabilities: error + insecureCapabilities: warning + controllersToScan: + - Deployments + - StatefulSets + - DaemonSets + - CronJobs + - Jobs + - ReplicationControllers + exemptions: + - controllerNames: + - dns-controller + - datadog-datadog + - kube-flannel-ds + - kube2iam + - aws-iam-authenticator + - datadog + - kube2iam + rules: + - hostNetworkSet + - controllerNames: + - aws-iam-authenticator + - aws-cluster-autoscaler + - kube-state-metrics + - dns-controller + - external-dns + - dnsmasq + - autoscaler + - kubernetes-dashboard + - install-cni + - kube2iam + rules: + - readinessProbeMissing + - livenessProbeMissing + - controllerNames: + - aws-iam-authenticator + - nginx-ingress-controller + - nginx-ingress-default-backend + - aws-cluster-autoscaler + - kube-state-metrics + - dns-controller + - external-dns + - kubedns + - dnsmasq + - autoscaler + - tiller + - kube2iam + rules: + - runAsRootAllowed + - controllerNames: + - aws-iam-authenticator + - nginx-ingress-controller + - nginx-ingress-default-backend + - aws-cluster-autoscaler + - kube-state-metrics + - dns-controller + - external-dns + - kubedns + - dnsmasq + - autoscaler + - tiller + - kube2iam + rules: + - notReadOnlyRootFileSystem + - controllerNames: + - cert-manager + - dns-controller + - kubedns + - dnsmasq + - autoscaler + - insights-agent-goldilocks-vpa-install + rules: + - cpuRequestsMissing + - cpuLimitsMissing + - memoryRequestsMissing + - memoryLimitsMissing + - controllerNames: + - kube2iam + - kube-flannel-ds + rules: + - runAsPrivileged + - controllerNames: + - kube-hunter + rules: + - hostPIDSet + - controllerNames: + - polaris + - kube-hunter + - goldilocks + - insights-agent-goldilocks-vpa-install + rules: + - notReadOnlyRootFileSystem + - controllerNames: + - insights-agent-goldilocks-controller + rules: + - livenessProbeMissing + - readinessProbeMissing + - controllerNames: + - insights-agent-goldilocks-vpa-install + - kube-hunter + rules: + - runAsRootAllowed diff --git a/pkg/cmd/cleanup.go b/pkg/cmd/cleanup.go new file mode 100644 index 000000000..bc908bd50 --- /dev/null +++ b/pkg/cmd/cleanup.go @@ -0,0 +1,31 @@ +package cmd + +import ( + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/spf13/cobra" + extapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetCleanupCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "cleanup", + Short: "Delete custom resource definitions created by starboard", + RunE: func(cmd *cobra.Command, args []string) (err error) { + config, err := cf.ToRESTConfig() + if err != nil { + return + } + client, err := extapi.NewForConfig(config) + if err != nil { + return + } + crm, err := kube.NewCRManager(client) + if err != nil { + return + } + return crm.Cleanup() + }, + } + return cmd +} diff --git a/pkg/cmd/find.go b/pkg/cmd/find.go new file mode 100644 index 000000000..b649dd54d --- /dev/null +++ b/pkg/cmd/find.go @@ -0,0 +1,16 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetFindCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + findCmd := &cobra.Command{ + Use: "find", + Short: "Manage security scanners", + } + findCmd.AddCommand(GetVulnerabilitiesCmd(cf)) + + return findCmd +} diff --git a/pkg/cmd/find_vulnerabilities.go b/pkg/cmd/find_vulnerabilities.go new file mode 100644 index 000000000..d668cffcf --- /dev/null +++ b/pkg/cmd/find_vulnerabilities.go @@ -0,0 +1,79 @@ +package cmd + +import ( + "github.com/aquasecurity/starboard/pkg/find/vulnerabilities/crd" + "github.com/aquasecurity/starboard/pkg/find/vulnerabilities/trivy" + secapi "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" +) + +func GetVulnerabilitiesCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Aliases: []string{"vulns", "vuln"}, + Use: "vulnerabilities (NAME | TYPE/NAME)", + Short: "Scan a given workload for vulnerabilities using Trivy scanner", + Long: `Scan a given workload for vulnerabilities using Trivy scanner + +TYPE is a Kubernetes workload. Shortcuts and API groups will be resolved, e.g. 'po' or 'deployments.apps'. +NAME is the name of a particular Kubernetes workload. +`, + Example: ` # Scan a pod with the specified name + kubectl starboard find vulnerabilities nginx + + # Scan a pod with the specified name in the specified namespace + kubectl starboard find vulns po/nginx -n staging + + # Scan a replicaset with the specified name + kubectl starboard find vuln replicaset/nginx + + # Scan a replicationcontroller with the given name + kubectl starboard find vulns rc/nginx + + # Scan a deployment with the specified name + kubectl starboard find vulns deployments.apps/nginx + + # Scan a daemonset with the specified name + kubectl starboard find vulns daemonsets/nginx + + # Scan a statefulset with the specified name + kubectl starboard find vulns sts/redis + + # Scan a job with the specified name + kubectl starboard find vulns job/my-job + + # Scan a cronjob with the specified name + kubectl starboard find vulns cj/my-cronjob`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + ns, _, err := cf.ToRawKubeConfigLoader().Namespace() + if err != nil { + return + } + workload, err := WorkloadFromArgs(ns, args) + if err != nil { + return + } + config, err := cf.ToRESTConfig() + if err != nil { + return + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + reports, err := trivy.NewScanner(clientset).Scan(workload) + if err != nil { + return + } + secClientset, err := secapi.NewForConfig(config) + if err != nil { + return + } + err = crd.NewWriter(secClientset).Write(workload, reports) + return + }, + } + + return cmd +} diff --git a/pkg/cmd/init.go b/pkg/cmd/init.go new file mode 100644 index 000000000..2fbbc23cf --- /dev/null +++ b/pkg/cmd/init.go @@ -0,0 +1,31 @@ +package cmd + +import ( + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/spf13/cobra" + extapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetInitCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "init", + Short: "Create custom resource definitions used by starboard", + RunE: func(cmd *cobra.Command, args []string) (err error) { + config, err := cf.ToRESTConfig() + if err != nil { + return + } + client, err := extapi.NewForConfig(config) + if err != nil { + return + } + crm, err := kube.NewCRManager(client) + if err != nil { + return + } + return crm.Init() + }, + } + return cmd +} diff --git a/pkg/cmd/kube_bench.go b/pkg/cmd/kube_bench.go new file mode 100644 index 000000000..92cf44447 --- /dev/null +++ b/pkg/cmd/kube_bench.go @@ -0,0 +1,36 @@ +package cmd + +import ( + "github.com/aquasecurity/starboard/pkg/kubebench" + "github.com/aquasecurity/starboard/pkg/kubebench/crd" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetKubeBenchCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "kube-bench", + Short: "Run the CIS Kubernetes Benchmark https://www.cisecurity.org/benchmark/kubernetes", + RunE: func(cmd *cobra.Command, args []string) (err error) { + config, err := cf.ToRESTConfig() + if err != nil { + return + } + scanner, err := kubebench.NewScanner(config) + if err != nil { + return + } + report, node, err := scanner.Scan() + if err != nil { + return + } + writer, err := crd.NewWriter(config) + if err != nil { + return + } + err = writer.Write(report, node) + return + }, + } + return cmd +} diff --git a/pkg/cmd/kube_hunter.go b/pkg/cmd/kube_hunter.go new file mode 100644 index 000000000..a5237247a --- /dev/null +++ b/pkg/cmd/kube_hunter.go @@ -0,0 +1,39 @@ +package cmd + +import ( + "github.com/aquasecurity/starboard/pkg/kubehunter" + "github.com/aquasecurity/starboard/pkg/kubehunter/crd" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetKubeHunterCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "kube-hunter", + Short: "Hunt for security weaknesses", + RunE: func(cmd *cobra.Command, args []string) (err error) { + config, err := cf.ToRESTConfig() + if err != nil { + return + } + scanner, err := kubehunter.NewScanner(config) + if err != nil { + return + } + report, err := scanner.Scan() + if err != nil { + return + } + writer, err := crd.NewWriter(config) + if err != nil { + return + } + err = writer.Write(report, "cluster") + if err != nil { + return + } + return + }, + } + return cmd +} diff --git a/pkg/cmd/polaris.go b/pkg/cmd/polaris.go new file mode 100644 index 000000000..306034dfd --- /dev/null +++ b/pkg/cmd/polaris.go @@ -0,0 +1,40 @@ +package cmd + +import ( + secapi "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + "github.com/aquasecurity/starboard/pkg/polaris" + "github.com/aquasecurity/starboard/pkg/polaris/crd" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetPolarisCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "polaris", + Short: "Run a variety of checks to ensure that Kubernetes pods and controllers are configured using best practices", + RunE: func(cmd *cobra.Command, args []string) (err error) { + config, err := cf.ToRESTConfig() + if err != nil { + return + } + scanner, err := polaris.NewScanner(config) + if err != nil { + return + } + reports, err := scanner.Scan() + if err != nil { + return + } + secClientset, err := secapi.NewForConfig(config) + if err != nil { + return + } + err = crd.NewWriter(secClientset).WriteAll(reports) + if err != nil { + return + } + return + }, + } + return cmd +} diff --git a/pkg/cmd/rbac.go b/pkg/cmd/rbac.go new file mode 100644 index 000000000..41d041af9 --- /dev/null +++ b/pkg/cmd/rbac.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetRBACCmd(cf *genericclioptions.ConfigFlags) *cobra.Command { + cmd := &cobra.Command{ + Use: "rbac", + Short: "Get RBAC config to run starboard", + RunE: func(cmd *cobra.Command, args []string) (err error) { + _, err = cf.ToRESTConfig() + if err != nil { + return + } + return + }, + } + return cmd +} diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go new file mode 100644 index 000000000..4f1dc42c7 --- /dev/null +++ b/pkg/cmd/root.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "errors" + "strings" + + "github.com/aquasecurity/starboard/pkg/kube" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func GetRootCmd() *cobra.Command { + var cf *genericclioptions.ConfigFlags + + rootCmd := &cobra.Command{ + Use: "starboard", + Short: "Kubernetes-native security", + SilenceErrors: true, + SilenceUsage: true, + } + + cf = genericclioptions.NewConfigFlags(true) + + rootCmd.AddCommand(GetInitCmd(cf)) + rootCmd.AddCommand(GetRBACCmd(cf)) + rootCmd.AddCommand(GetFindCmd(cf)) + rootCmd.AddCommand(GetKubeBenchCmd(cf)) + rootCmd.AddCommand(GetKubeHunterCmd(cf)) + rootCmd.AddCommand(GetPolarisCmd(cf)) + rootCmd.AddCommand(GetCleanupCmd(cf)) + + SetFlags(cf, rootCmd) + + return rootCmd +} + +func SetFlags(cf *genericclioptions.ConfigFlags, cmd *cobra.Command) { + cf.AddFlags(cmd.Flags()) + for _, c := range cmd.Commands() { + SetFlags(cf, c) + } +} + +func WorkloadFromArgs(namespace string, args []string) (workload kube.Workload, err error) { + if len(args) < 1 { + err = errors.New("required workload kind and name not specified") + return + } + + parts := strings.SplitN(args[0], "/", 2) + if len(parts) == 1 { + workload = kube.Workload{ + Namespace: namespace, + Kind: kube.WorkloadKindPod, + Name: parts[0], + } + return + } + kind, err := kube.WorkloadKindFromString(parts[0]) + if err != nil { + return + } + if "" == parts[1] { + err = errors.New("required workload name is blank") + return + } + workload = kube.Workload{ + Namespace: namespace, + Kind: kind, + Name: parts[1], + } + return +} diff --git a/pkg/cmd/root_test.go b/pkg/cmd/root_test.go new file mode 100644 index 000000000..deab3cd81 --- /dev/null +++ b/pkg/cmd/root_test.go @@ -0,0 +1,212 @@ +package cmd + +import ( + "errors" + "testing" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/stretchr/testify/assert" +) + +func TestWorkloadFromArgs(t *testing.T) { + + testCases := []struct { + name string + + givenArgs []string + + expectedWorkload kube.Workload + expectedError error + }{ + { + name: "Should return Pod/my-pod when kind is not explicitly specified", + givenArgs: []string{"my-pod"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindPod, Name: "my-pod"}, + }, + { + name: "Should return Pod/my-pod when kind is specified as pods", + givenArgs: []string{"pods/my-pod"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindPod, Name: "my-pod"}, + }, + { + name: "Should return Pod/my-pod when kind is specified as pod", + givenArgs: []string{"pod/my-pod"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindPod, Name: "my-pod"}, + }, + { + name: "Should return Pod/my-pod when kind is specified as po", + givenArgs: []string{"po/my-pod"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindPod, Name: "my-pod"}, + }, + { + name: "Should return ReplicaSet/my-rs when kind is specified as replicasets.apps", + givenArgs: []string{"replicasets.apps/my-rs"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicaSet, Name: "my-rs"}, + }, + { + name: "Should return ReplicaSet/my-rs when kind is specified as replicasets", + givenArgs: []string{"replicasets/my-rs"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicaSet, Name: "my-rs"}, + }, + { + name: "Should return ReplicaSet/my-rs when kind is specified as replicaset", + givenArgs: []string{"replicaset/my-rs"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicaSet, Name: "my-rs"}, + }, + { + name: "Should return ReplicaSet/my-rs when kind is specified as rs", + givenArgs: []string{"rs/my-rs"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicaSet, Name: "my-rs"}, + }, + { + name: "Should return ReplicationController/my-rc when kind is specified as replicationcontrollers", + givenArgs: []string{"replicationcontrollers/my-rc"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicationController, Name: "my-rc"}, + }, + { + name: "Should return ReplicationController/my-rc when kind is specified as replicationcontroller", + givenArgs: []string{"replicationcontroller/my-rc"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicationController, Name: "my-rc"}, + }, + { + name: "Should return ReplicationController/my-rc when kind is specified as rc", + givenArgs: []string{"rc/my-rc"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindReplicationController, Name: "my-rc"}, + }, + { + name: "Should return Deployment/my-deployment when kind is specified as deployments.apps", + givenArgs: []string{"deploy/my-deployment"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDeployment, Name: "my-deployment"}, + }, + { + name: "Should return Deployment/my-deployment when kind is specified as deployments", + givenArgs: []string{"deployments/my-deployment"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDeployment, Name: "my-deployment"}, + }, + { + name: "Should return Deployment/my-deployment when kind is specified as deployment", + givenArgs: []string{"deployment/my-deployment"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDeployment, Name: "my-deployment"}, + }, + { + name: "Should return Deployment/my-deployment when kind is specified as deploy", + givenArgs: []string{"deploy/my-deployment"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDeployment, Name: "my-deployment"}, + }, + { + name: "Should return DaemonSet/my-ds when kind is specified as daemonsets.apps", + givenArgs: []string{"daemonsets/my-ds"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDaemonSet, Name: "my-ds"}, + }, + { + name: "Should return DaemonSet/my-ds when kind is specified as daemonsets", + givenArgs: []string{"daemonsets/my-ds"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDaemonSet, Name: "my-ds"}, + }, + { + name: "Should return DaemonSet/my-ds when kind is specified as daemonset", + givenArgs: []string{"daemonsets/my-ds"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDaemonSet, Name: "my-ds"}, + }, + { + name: "Should return DaemonSet/my-ds when kind is specified as ds", + givenArgs: []string{"daemonsets/my-ds"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindDaemonSet, Name: "my-ds"}, + }, + { + name: "Should return StatefulSet/my-sts when kind is specified as statefulsets.apps", + givenArgs: []string{"statefulsets.apps/my-sts"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindStatefulSet, Name: "my-sts"}, + }, + { + name: "Should return StatefulSet/my-sts when kind is specified as statefulsets", + givenArgs: []string{"statefulsets/my-sts"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindStatefulSet, Name: "my-sts"}, + }, + { + name: "Should return StatefulSet/my-sts when kind is specified as statefulset", + givenArgs: []string{"statefulset/my-sts"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindStatefulSet, Name: "my-sts"}, + }, + { + name: "Should return StatefulSet/my-sts when kind is specified as sts", + givenArgs: []string{"sts/my-sts"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindStatefulSet, Name: "my-sts"}, + }, + { + name: "Should return CronJob/my-cj when kind is specified as cronjobs.batch", + givenArgs: []string{"cronjobs.batch/my-cj"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindCronJob, Name: "my-cj"}, + }, + { + name: "Should return CronJob/my-cj when kind is specified as cronjob.batch", + givenArgs: []string{"cronjob.batch/my-cj"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindCronJob, Name: "my-cj"}, + }, + { + name: "Should return CronJob/my-cj when kind is specified as cronjobs", + givenArgs: []string{"cronjobs/my-cj"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindCronJob, Name: "my-cj"}, + }, + { + name: "Should return CronJob/my-cj when kind is specified as cronjob", + givenArgs: []string{"cronjob/my-cj"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindCronJob, Name: "my-cj"}, + }, + { + name: "Should return CronJob/my-cj when kind is specified as cj", + givenArgs: []string{"cj/my-cj"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindCronJob, Name: "my-cj"}, + }, + { + name: "Should return Job/my-job when kind is specified as jobs.batch", + givenArgs: []string{"jobs.batch/my-job"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindJob, Name: "my-job"}, + }, + { + name: "Should return Job/my-job when kind is specified as job.batch", + givenArgs: []string{"job.batch/my-job"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindJob, Name: "my-job"}, + }, + { + name: "Should return Job/my-job when kind is specified as jobs", + givenArgs: []string{"jobs/my-job"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindJob, Name: "my-job"}, + }, + { + name: "Should return Job/my-job when kind is specified as job", + givenArgs: []string{"job/my-job"}, + expectedWorkload: kube.Workload{Kind: kube.WorkloadKindJob, Name: "my-job"}, + }, + { + name: "Should return error when neither workload kind nor name is specified", + givenArgs: []string{}, + expectedWorkload: kube.Workload{}, + expectedError: errors.New("required workload kind and name not specified"), + }, + { + name: "Should return error when kind is unrecognized", + givenArgs: []string{"xpod/my-pod"}, + expectedWorkload: kube.Workload{}, + expectedError: errors.New("unrecognized workload: xpod"), + }, + { + name: "Should return error when workload name is blank", + givenArgs: []string{"pod/"}, + expectedError: errors.New("required workload name is blank"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + workload, err := WorkloadFromArgs("", tc.givenArgs) + switch { + case tc.expectedError != nil: + assert.EqualError(t, err, tc.expectedError.Error()) + default: + assert.Equal(t, tc.expectedWorkload, workload) + } + }) + } + +} diff --git a/pkg/docker/config.go b/pkg/docker/config.go new file mode 100644 index 000000000..44c7e8f10 --- /dev/null +++ b/pkg/docker/config.go @@ -0,0 +1,47 @@ +package docker + +import ( + "encoding/base64" + "encoding/json" + "strings" +) + +// ServerCredentials represent credentials used to login to a Docker server. +type ServerCredentials struct { + Auth string `json:"auth"` + Username string `json:"username"` + Password string `json:"password"` +} + +// Credentials represents Docker credentials which are typically stored in `~/.docker/config.json`. +type Credentials struct { + Auths map[string]ServerCredentials `json:"auths"` +} + +func ReadCredentialsFromBytes(contents []byte) (cfg map[string]ServerCredentials, err error) { + var credentials Credentials + if err = json.Unmarshal(contents, &credentials); err != nil { + return nil, err + } + return encodeAuth(credentials.Auths) +} + +func encodeAuth(config map[string]ServerCredentials) (encodedConfig map[string]ServerCredentials, err error) { + encodedConfig = make(map[string]ServerCredentials) + for server, entry := range config { + var decodedAuth []byte + decodedAuth, err = base64.StdEncoding.DecodeString(entry.Auth) + if err != nil { + return + } + splitDecodedAuth := strings.Split(string(decodedAuth), ":") + + encodedConfig[server] = ServerCredentials{ + Auth: entry.Auth, + Username: splitDecodedAuth[0], + Password: splitDecodedAuth[1], + } + + } + return +} diff --git a/pkg/docker/config_test.go b/pkg/docker/config_test.go new file mode 100644 index 000000000..6d383c29d --- /dev/null +++ b/pkg/docker/config_test.go @@ -0,0 +1,74 @@ +package docker + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReadCredentialsFromBytes(t *testing.T) { + testCases := []struct { + name string + + givenJSON string + + expectedCredentials map[string]ServerCredentials + expectedError error + }{ + { + name: "Should return empty credentials when content is empty JSON object", + givenJSON: "{}", + expectedCredentials: map[string]ServerCredentials{}, + }, + { + name: "Should return empty credentials when content is null JSON", + givenJSON: "null", + expectedCredentials: map[string]ServerCredentials{}, + }, + { + name: "Should return error when content is blank", + givenJSON: "", + expectedError: errors.New("unexpected end of JSON input"), + }, + { + name: "Should return server credentials with encoded username and password", + givenJSON: `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "ZG9ja2VyOmh1Yg==" + }, + "harbor.domain": { + "auth": "YWRtaW46SGFyYm9yMTIzNDU=" + } + } +}`, + expectedCredentials: map[string]ServerCredentials{ + "harbor.domain": { + Auth: "YWRtaW46SGFyYm9yMTIzNDU=", + Username: "admin", + Password: "Harbor12345", + }, + "https://index.docker.io/v1/": { + Auth: "ZG9ja2VyOmh1Yg==", + Username: "docker", + Password: "hub", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + credentials, err := ReadCredentialsFromBytes([]byte(tc.givenJSON)) + switch { + case tc.expectedError != nil: + assert.EqualError(t, err, tc.expectedError.Error()) + default: + assert.NoError(t, err) + assert.Equal(t, tc.expectedCredentials, credentials) + } + }) + + } +} diff --git a/pkg/ext/clock.go b/pkg/ext/clock.go new file mode 100644 index 000000000..05677b4bb --- /dev/null +++ b/pkg/ext/clock.go @@ -0,0 +1,34 @@ +package ext + +import "time" + +// Clock wraps the Now method. Introduced to allow replacing the global state with fixed clocks to facilitate testing. +// Now returns the current time. +type Clock interface { + Now() time.Time +} + +type systemClock struct { +} + +func (c *systemClock) Now() time.Time { + return time.Now() +} + +func NewSystemClock() Clock { + return &systemClock{} +} + +type fixedClock struct { + fixedTime time.Time +} + +func (c *fixedClock) Now() time.Time { + return c.fixedTime +} + +func NewFixedClock(fixedTime time.Time) Clock { + return &fixedClock{ + fixedTime: fixedTime, + } +} diff --git a/pkg/ext/math.go b/pkg/ext/math.go new file mode 100644 index 000000000..e57e6d8bb --- /dev/null +++ b/pkg/ext/math.go @@ -0,0 +1,8 @@ +package ext + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/pkg/ext/math_test.go b/pkg/ext/math_test.go new file mode 100644 index 000000000..2536aa524 --- /dev/null +++ b/pkg/ext/math_test.go @@ -0,0 +1,37 @@ +package ext + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMinInt(t *testing.T) { + testCases := []struct { + a int + b int + result int + }{ + { + a: 2, + b: 3, + result: 2, + }, + { + a: 5, + b: 4, + result: 4, + }, + { + a: 4, + b: 4, + result: 4, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("(%d, %d): %d", tc.a, tc.b, tc.result), func(t *testing.T) { + assert.Equal(t, tc.result, MinInt(tc.a, tc.b)) + }) + } +} diff --git a/pkg/find/vulnerabilities/crd/writer.go b/pkg/find/vulnerabilities/crd/writer.go new file mode 100644 index 000000000..dc6612978 --- /dev/null +++ b/pkg/find/vulnerabilities/crd/writer.go @@ -0,0 +1,49 @@ +package crd + +import ( + "fmt" + + "github.com/aquasecurity/starboard/pkg/kube" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/find/vulnerabilities" + clientset "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + "github.com/google/uuid" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type writer struct { + client clientset.Interface +} + +func NewWriter(client clientset.Interface) vulnerabilities.Writer { + return &writer{ + client: client, + } +} + +func (s *writer) Write(workload kube.Workload, reports map[string]sec.VulnerabilityReport) (err error) { + for container, report := range reports { + err = s.createVulnerability(workload, container, report) + if err != nil { + return + } + } + return +} + +func (s *writer) createVulnerability(workload kube.Workload, container string, report sec.VulnerabilityReport) (err error) { + _, err = s.client.AquasecurityV1alpha1().Vulnerabilities(workload.Namespace).Create(&sec.Vulnerability{ + ObjectMeta: meta.ObjectMeta{ + Name: fmt.Sprintf(uuid.New().String()), + Labels: map[string]string{ + kube.LabelWorkloadKind: workload.Kind.String(), + kube.LabelWorkloadName: workload.Name, + kube.LabelContainerName: container, + }, + }, + Report: report, + }) + + return err +} diff --git a/pkg/find/vulnerabilities/scanner.go b/pkg/find/vulnerabilities/scanner.go new file mode 100644 index 000000000..6fc8a1339 --- /dev/null +++ b/pkg/find/vulnerabilities/scanner.go @@ -0,0 +1,12 @@ +package vulnerabilities + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/kube" + core "k8s.io/api/core/v1" +) + +type Scanner interface { + Scan(workload kube.Workload) (reports map[string]sec.VulnerabilityReport, err error) + ScanByPodSpec(workload kube.Workload, spec core.PodSpec) (reports map[string]sec.VulnerabilityReport, err error) +} diff --git a/pkg/find/vulnerabilities/trivy/converter.go b/pkg/find/vulnerabilities/trivy/converter.go new file mode 100644 index 000000000..93e94ef5c --- /dev/null +++ b/pkg/find/vulnerabilities/trivy/converter.go @@ -0,0 +1,88 @@ +package trivy + +import ( + "encoding/json" + "io" + "time" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Converter is the interface that wraps the Convert method. +// +// Convert converts the vulnerabilities model used by Trivy +// to a generic model defined by K8S-native security CRDs. +type Converter interface { + Convert(reader io.Reader) (sec.VulnerabilityReport, error) +} + +type converter struct { +} + +var DefaultConverter Converter = &converter{} + +func (c *converter) Convert(reader io.Reader) (report sec.VulnerabilityReport, err error) { + var scanReports []ScanReport + err = json.NewDecoder(reader).Decode(&scanReports) + if err != nil { + return + } + report = c.convert(scanReports) + return +} + +func (c *converter) convert(reports []ScanReport) sec.VulnerabilityReport { + var vulnerabilities []sec.VulnerabilityItem + + // TODO There might be > 1 item in the slice of reports (for app dependencies) + for _, sr := range reports[0].Vulnerabilities { + vulnerabilities = append(vulnerabilities, sec.VulnerabilityItem{ + VulnerabilityID: sr.VulnerabilityID, + Resource: sr.PkgName, + InstalledVersion: sr.InstalledVersion, + FixedVersion: sr.FixedVersion, + Severity: sr.Severity, + LayerID: sr.LayerID, + Title: sr.Title, + Description: sr.Description, + Links: c.toLinks(sr.References), + }) + } + + return sec.VulnerabilityReport{ + GeneratedAt: meta.NewTime(time.Now()), + Scanner: sec.Scanner{ + Name: "Trivy", + Vendor: "Aqua Security", + Version: "latest", + }, + Summary: c.toSummary(vulnerabilities), + Vulnerabilities: vulnerabilities, + } +} + +func (c *converter) toLinks(references []string) []string { + if references == nil { + return []string{} + } + return references +} + +func (c *converter) toSummary(vulnerabilities []sec.VulnerabilityItem) (vs sec.VulnerabilitySummary) { + for _, v := range vulnerabilities { + switch v.Severity { + case sec.SeverityCritical: + vs.CriticalCount++ + case sec.SeverityHigh: + vs.HighCount++ + case sec.SeverityMedium: + vs.MediumCount++ + case sec.SeverityLow: + vs.LowCount++ + default: + vs.UnknownCount++ + } + } + return +} diff --git a/pkg/find/vulnerabilities/trivy/converter_test.go b/pkg/find/vulnerabilities/trivy/converter_test.go new file mode 100644 index 000000000..cae903981 --- /dev/null +++ b/pkg/find/vulnerabilities/trivy/converter_test.go @@ -0,0 +1 @@ +package trivy diff --git a/pkg/find/vulnerabilities/trivy/model.go b/pkg/find/vulnerabilities/trivy/model.go new file mode 100644 index 000000000..ad39847ba --- /dev/null +++ b/pkg/find/vulnerabilities/trivy/model.go @@ -0,0 +1,22 @@ +package trivy + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" +) + +type ScanReport struct { + Target string `json:"Target"` + Vulnerabilities []Vulnerability `json:"Vulnerabilities"` +} + +type Vulnerability struct { + VulnerabilityID string `json:"VulnerabilityID"` + PkgName string `json:"PkgName"` + InstalledVersion string `json:"InstalledVersion"` + FixedVersion string `json:"FixedVersion"` + Title string `json:"Title"` + Description string `json:"Description"` + Severity sec.Severity `json:"Severity"` + LayerID string `json:"LayerID"` + References []string `json:"References"` +} diff --git a/pkg/find/vulnerabilities/trivy/scanner.go b/pkg/find/vulnerabilities/trivy/scanner.go new file mode 100644 index 000000000..f5b69632e --- /dev/null +++ b/pkg/find/vulnerabilities/trivy/scanner.go @@ -0,0 +1,229 @@ +package trivy + +import ( + "fmt" + "github.com/aquasecurity/starboard/pkg/ext" + "io" + "time" + + "k8s.io/klog" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/aquasecurity/starboard/pkg/runner" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/find/vulnerabilities" + "github.com/aquasecurity/starboard/pkg/kube/pod" + "github.com/aquasecurity/starboard/pkg/kube/secret" + "github.com/google/uuid" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/pointer" +) + +const ( + kubernetesNameMaxLength int = 63 +) + +const ( + trivyImageRef = "docker.io/aquasec/trivy:latest" +) + +var ( + scanJobRunnerTimeout = 60 * time.Second +) + +type scanner struct { + clientset kubernetes.Interface + pods *pod.Manager + secrets *secret.Manager + converter Converter +} + +func NewScanner(clientset kubernetes.Interface) vulnerabilities.Scanner { + return &scanner{ + clientset: clientset, + pods: pod.NewPodManager(clientset), + secrets: secret.NewSecretManager(clientset), + converter: DefaultConverter, + } +} + +func (s *scanner) Scan(workload kube.Workload) (reports map[string]sec.VulnerabilityReport, err error) { + klog.V(3).Infof("Getting Pod template for workload: %v", workload) + podSpec, err := s.pods.GetPodSpecByWorkload(workload) + if err != nil { + err = fmt.Errorf("getting Pod template: %w", err) + return + } + + reports, err = s.ScanByPodSpec(workload, podSpec) + if err != nil { + return + } + return +} + +func (s *scanner) ScanByPodSpec(workload kube.Workload, spec core.PodSpec) (map[string]sec.VulnerabilityReport, error) { + job, err := s.prepareJob(workload, spec) + if err != nil { + return nil, fmt.Errorf("preparing scan job: %w", err) + } + + err = runner.New(scanJobRunnerTimeout). + Run(kube.NewRunnableJob(s.clientset, job)) + if err != nil { + return nil, fmt.Errorf("running scan job: %w", err) + } + + defer func() { + klog.V(3).Infof("Deleting job: %s/%s", job.Namespace, job.Name) + background := meta.DeletePropagationBackground + _ = s.clientset.BatchV1().Jobs(job.Namespace).Delete(job.Name, &meta.DeleteOptions{ + PropagationPolicy: &background, + }) + }() + + klog.V(3).Infof("Scan job completed: %s/%s", job.Namespace, job.Name) + + job, err = s.clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, meta.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("getting scan job: %w", err) + } + + return s.getScanReportsFor(job) +} + +func (s *scanner) prepareJob(workload kube.Workload, spec core.PodSpec) (*batch.Job, error) { + credentials, err := s.secrets.GetImagesWithCredentials(workload.Namespace, spec) + if err != nil { + return nil, fmt.Errorf("getting docker configs: %w", err) + } + + jobName := fmt.Sprintf(uuid.New().String()) + jobName = jobName[:ext.MinInt(len(jobName), kubernetesNameMaxLength)] + + initContainers := []core.Container{ + { + Name: jobName, + Image: trivyImageRef, + ImagePullPolicy: core.PullAlways, + Command: []string{ + "trivy", + }, + Args: []string{ + "--download-db-only", + "--cache-dir", + "/var/lib/trivy", + }, + VolumeMounts: []core.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/var/lib/trivy", + }, + }, + }, + } + + scanJobContainers := make([]core.Container, len(spec.Containers)) + for i, c := range spec.Containers { + var envs []core.EnvVar + if dockerConfig, ok := credentials[c.Image]; ok { + envs = append(envs, core.EnvVar{ + Name: "TRIVY_USERNAME", + Value: dockerConfig.Username, + }, core.EnvVar{ + Name: "TRIVY_PASSWORD", + Value: dockerConfig.Password, + }) + } + + scanJobContainers[i] = core.Container{ + Name: c.Name, + Image: trivyImageRef, + ImagePullPolicy: core.PullAlways, + Env: envs, + Command: []string{ + "trivy", + }, + Args: []string{ + "--skip-update", + "--cache-dir", + "/var/lib/trivy", + "--no-progress", + "--quiet", + "--format", + "json", + c.Image, + }, + VolumeMounts: []core.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/var/lib/trivy", + }, + }, + } + } + + return &batch.Job{ + ObjectMeta: meta.ObjectMeta{ + Name: jobName, + Namespace: workload.Namespace, + Labels: map[string]string{ + kube.LabelWorkloadKind: workload.Kind.String(), + kube.LabelWorkloadName: workload.Name, + }, + }, + Spec: batch.JobSpec{ + BackoffLimit: pointer.Int32Ptr(1), + Completions: pointer.Int32Ptr(1), + ActiveDeadlineSeconds: pointer.Int64Ptr(int64(scanJobRunnerTimeout.Seconds())), + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{ + kube.LabelWorkloadKind: workload.Kind.String(), + kube.LabelWorkloadName: workload.Name, + }, + }, + Spec: core.PodSpec{ + Volumes: []core.Volume{ + { + Name: "data", + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{ + Medium: core.StorageMediumDefault, + }, + }, + }, + }, + RestartPolicy: core.RestartPolicyNever, + InitContainers: initContainers, + Containers: scanJobContainers, + }, + }, + }, + }, nil +} + +func (s *scanner) getScanReportsFor(job *batch.Job) (reports map[string]sec.VulnerabilityReport, err error) { + reports = make(map[string]sec.VulnerabilityReport) + + for _, c := range job.Spec.Template.Spec.Containers { + klog.V(3).Infof("Getting logs for %s container in job: %s/%s", c.Name, job.Namespace, job.Name) + var logReader io.ReadCloser + logReader, err = s.pods.GetPodLogsByJob(job, c.Name) + if err != nil { + return + } + reports[c.Name], err = s.converter.Convert(logReader) + _ = logReader.Close() + if err != nil { + return + } + } + return +} diff --git a/pkg/find/vulnerabilities/writer.go b/pkg/find/vulnerabilities/writer.go new file mode 100644 index 000000000..7e3ddf3ce --- /dev/null +++ b/pkg/find/vulnerabilities/writer.go @@ -0,0 +1,10 @@ +package vulnerabilities + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/kube" +) + +type Writer interface { + Write(workload kube.Workload, reports map[string]sec.VulnerabilityReport) error +} diff --git a/pkg/kube/cr_manager.go b/pkg/kube/cr_manager.go new file mode 100644 index 000000000..8359f05ea --- /dev/null +++ b/pkg/kube/cr_manager.go @@ -0,0 +1,83 @@ +package kube + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + extapi "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +// CRManager defined methods for managing Kubernetes custom resources. +type CRManager interface { + Init() error + Cleanup() error +} + +type crManager struct { + client extapi.ApiextensionsV1beta1Interface +} + +// NewCRManager constructs a CRManager with the given Kubernetes config. +func NewCRManager(client extapi.ApiextensionsV1beta1Interface) (CRManager, error) { + return &crManager{ + client: client, + }, nil +} + +func (m *crManager) Init() (err error) { + err = m.createOrUpdate(&sec.VulnerabilitiesCRD) + if err != nil { + return + } + + err = m.createOrUpdate(&sec.CISKubernetesBenchmarksCRD) + if err != nil { + return + } + + err = m.createOrUpdate(&sec.KubeHunterReportCRD) + if err != nil { + return + } + + err = m.createOrUpdate(&sec.ConfigAuditReportsCRD) + + // TODO We should wait for CRD statuses and make sure that the names were accepted + return +} + +func (m *crManager) createOrUpdate(crd *v1beta1.CustomResourceDefinition) (err error) { + existingCRD, err := m.client.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + + switch { + case err == nil: + klog.V(3).Infof("Updating CRD: %s", crd.Name) + deepCopy := existingCRD.DeepCopy() + deepCopy.Spec = crd.Spec + _, err = m.client.CustomResourceDefinitions().Update(deepCopy) + case errors.IsNotFound(err): + klog.V(3).Infof("Creating CRD: %s", crd.Name) + _, err = m.client.CustomResourceDefinitions().Create(crd) + return + } + return +} + +func (m *crManager) Cleanup() (err error) { + err = m.client.CustomResourceDefinitions().Delete(sec.VulnerabilitiesCRName, &metav1.DeleteOptions{}) + if err != nil { + return + } + err = m.client.CustomResourceDefinitions().Delete(sec.CISKubernetesBenchmarksCRName, &metav1.DeleteOptions{}) + if err != nil { + return + } + err = m.client.CustomResourceDefinitions().Delete(sec.KubeHunterReportCRName, &metav1.DeleteOptions{}) + if err != nil { + return + } + err = m.client.CustomResourceDefinitions().Delete(sec.ConfigAuditReportsCRName, &metav1.DeleteOptions{}) + return +} diff --git a/pkg/kube/pod/manager.go b/pkg/kube/pod/manager.go new file mode 100644 index 000000000..5142e9080 --- /dev/null +++ b/pkg/kube/pod/manager.go @@ -0,0 +1,124 @@ +package pod + +import ( + "fmt" + "io" + + "github.com/aquasecurity/starboard/pkg/kube" + + apps "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type Manager struct { + clientset kubernetes.Interface +} + +func NewPodManager(clientset kubernetes.Interface) *Manager { + return &Manager{ + clientset: clientset, + } +} + +// GetPodSpecByWorkload returns a PodSpec of the specified Workload. +func (pw *Manager) GetPodSpecByWorkload(workload kube.Workload) (spec core.PodSpec, err error) { + ns := workload.Namespace + switch workload.Kind { + case kube.WorkloadKindPod: + var pod *core.Pod + pod, err = pw.GetPodByName(ns, workload.Name) + if err != nil { + return + } + spec = pod.Spec + return + case kube.WorkloadKindReplicaSet: + var rs *apps.ReplicaSet + rs, err = pw.clientset.AppsV1().ReplicaSets(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = rs.Spec.Template.Spec + return + case kube.WorkloadKindReplicationController: + var rc *core.ReplicationController + rc, err = pw.clientset.CoreV1().ReplicationControllers(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = rc.Spec.Template.Spec + return + case kube.WorkloadKindDeployment: + var deploy *apps.Deployment + deploy, err = pw.clientset.AppsV1().Deployments(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = deploy.Spec.Template.Spec + return + case kube.WorkloadKindStatefulSet: + var sts *apps.StatefulSet + sts, err = pw.clientset.AppsV1().StatefulSets(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = sts.Spec.Template.Spec + return + case kube.WorkloadKindDaemonSet: + var ds *apps.DaemonSet + ds, err = pw.clientset.AppsV1().DaemonSets(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = ds.Spec.Template.Spec + return + case kube.WorkloadKindCronJob: + var cj *batchv1beta1.CronJob + cj, err = pw.clientset.BatchV1beta1().CronJobs(ns).Get(workload.Name, meta.GetOptions{}) + if err != nil { + return + } + spec = cj.Spec.JobTemplate.Spec.Template.Spec + return + } + err = fmt.Errorf("unrecognized workload: %s", workload.Kind) + return +} + +func (pw *Manager) GetPodByName(namespace, name string) (*core.Pod, error) { + return pw.clientset.CoreV1().Pods(namespace).Get(name, meta.GetOptions{}) +} + +func (pw *Manager) GetPodLogsByJob(job *batch.Job, container string) (io.ReadCloser, error) { + pod, err := pw.GetPodByJob(job) + if err != nil { + return nil, err + } + + return pw.GetPodLogs(pod, container) +} + +// GetPodByJob gets the Pod controller by the specified Job. +func (pw *Manager) GetPodByJob(job *batch.Job) (*core.Pod, error) { + refreshedJob, err := pw.clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, meta.GetOptions{}) + if err != nil { + return nil, err + } + selector := fmt.Sprintf("controller-uid=%s", refreshedJob.Spec.Selector.MatchLabels["controller-uid"]) + podList, err := pw.clientset.CoreV1().Pods(job.Namespace).List(meta.ListOptions{ + LabelSelector: selector}) + if err != nil { + return nil, err + } + return &podList.Items[0], nil +} + +func (pw *Manager) GetPodLogs(pod *core.Pod, container string) (io.ReadCloser, error) { + req := pw.clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &core.PodLogOptions{ + Follow: true, Container: container}) + return req.Stream() +} diff --git a/pkg/kube/runnable_job.go b/pkg/kube/runnable_job.go new file mode 100644 index 000000000..6135a43bd --- /dev/null +++ b/pkg/kube/runnable_job.go @@ -0,0 +1,82 @@ +package kube + +import ( + "fmt" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/wait" + + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + + "github.com/aquasecurity/starboard/pkg/runner" + batch "k8s.io/api/batch/v1" + "k8s.io/client-go/kubernetes" +) + +var ( + defaultResyncDuration = 30 * time.Minute +) + +type runnableJob struct { + spec *batch.Job + clientset kubernetes.Interface +} + +// NewRunnableJob constructs a new Runnable task which runs a Kubernetes Job with the given spec and waits for the +// completion or failure. +func NewRunnableJob(clientset kubernetes.Interface, spec *batch.Job) runner.Runnable { + return &runnableJob{ + spec: spec, + clientset: clientset, + } +} + +func (j *runnableJob) Run() (err error) { + informerFactory := informers.NewSharedInformerFactoryWithOptions( + j.clientset, + defaultResyncDuration, + informers.WithNamespace(j.spec.Namespace), + ) + jobInformer := informerFactory.Batch().V1().Jobs() + + klog.V(3).Infof("Creating runnable job: %s/%s", j.spec.Namespace, j.spec.Name) + j.spec, err = j.clientset.BatchV1().Jobs(j.spec.Namespace).Create(j.spec) + if err != nil { + err = fmt.Errorf("creating job: %w", err) + return + } + + complete := make(chan error) + + jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + newJob, ok := newObj.(*batch.Job) + if !ok { + return + } + if j.spec.UID != newJob.UID { + return + } + if len(newJob.Status.Conditions) == 0 { + return + } + switch condition := newJob.Status.Conditions[0]; condition.Type { + case batch.JobComplete: + klog.V(3).Infof("Stopping runnable job on task completion with status: %s", batch.JobComplete) + complete <- nil + case batch.JobFailed: + klog.V(3).Infof("Stopping runnable job on task failure with status: %s", batch.JobFailed) + complete <- fmt.Errorf("job failed: %s: %s", condition.Reason, condition.Message) + } + }, + }) + + informerFactory.Start(wait.NeverStop) + informerFactory.WaitForCacheSync(wait.NeverStop) + + err = <-complete + return +} diff --git a/pkg/kube/secret/manager.go b/pkg/kube/secret/manager.go new file mode 100644 index 000000000..6c177cd79 --- /dev/null +++ b/pkg/kube/secret/manager.go @@ -0,0 +1,82 @@ +package secret + +import ( + "strings" + + "github.com/aquasecurity/starboard/pkg/docker" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type Manager struct { + clientset kubernetes.Interface +} + +// NewSecretManager constructs new Manager with the specified Kubernetes Clientset. +func NewSecretManager(clientset kubernetes.Interface) *Manager { + return &Manager{ + clientset: clientset, + } +} + +// GetImagesWithCredentials gets private images for the specified PodSpec and maps them to the Docker ServerCredentials. +func (s *Manager) GetImagesWithCredentials(namespace string, spec core.PodSpec) (credentials map[string]docker.ServerCredentials, err error) { + images := s.GetImages(spec) + + serverCredentials, err := s.GetServersWithCredentials(namespace, spec.ImagePullSecrets) + if err != nil { + return + } + + credentials = make(map[string]docker.ServerCredentials) + for _, image := range images { + server := s.GetServerFromImage(image) + if ce, ok := serverCredentials[server]; ok { + credentials[image] = ce + } + } + + return +} + +// GetImages gets a slice of images for the specified PodSpec. +func (s *Manager) GetImages(spec core.PodSpec) (images []string) { + for _, c := range spec.InitContainers { + images = append(images, c.Image) + } + + for _, c := range spec.Containers { + images = append(images, c.Image) + } + + return +} + +func (s *Manager) GetServersWithCredentials(namespace string, imagePullSecrets []core.LocalObjectReference) (credentials map[string]docker.ServerCredentials, err error) { + credentials = make(map[string]docker.ServerCredentials) + + for _, secret := range imagePullSecrets { + secret, err := s.clientset.CoreV1(). + Secrets(namespace). + Get(secret.Name, meta.GetOptions{}) + + if err != nil { + return nil, err + } + dockerCfg, err := docker.ReadCredentialsFromBytes(secret.Data[".dockerconfigjson"]) + for server, configEntry := range dockerCfg { + credentials[server] = configEntry + } + } + + return +} + +func (s *Manager) GetServerFromImage(image string) string { + chunks := strings.Split(image, "/") + if len(chunks) > 0 { + return chunks[0] + } + return "" +} diff --git a/pkg/kube/workload.go b/pkg/kube/workload.go new file mode 100644 index 000000000..6100c542e --- /dev/null +++ b/pkg/kube/workload.go @@ -0,0 +1,77 @@ +package kube + +import ( + "fmt" +) + +const ( + LabelWorkloadKind = "starboard.workload.kind" + LabelWorkloadName = "starboard.workload.name" + LabelContainerName = "starboard.container.name" +) + +// WorkloadKind is an enum defining the different kinds of Kubernetes workloads. +type WorkloadKind int + +const ( + WorkloadKindUnknown WorkloadKind = iota + WorkloadKindPod + WorkloadKindReplicaSet + WorkloadKindReplicationController + WorkloadKindDeployment + WorkloadKindStatefulSet + WorkloadKindDaemonSet + WorkloadKindCronJob + WorkloadKindJob +) + +var workloadKindToString = map[WorkloadKind]string{ + WorkloadKindUnknown: "Unknown", + WorkloadKindPod: "Pod", + WorkloadKindReplicaSet: "ReplicaSet", + WorkloadKindReplicationController: "ReplicationController", + WorkloadKindDeployment: "Deployment", + WorkloadKindStatefulSet: "StatefulSet", + WorkloadKindDaemonSet: "DaemonSet", + WorkloadKindCronJob: "CronJob", + WorkloadKindJob: "Job", +} + +func (t WorkloadKind) String() string { + if s, exists := workloadKindToString[t]; exists { + return s + } + return "Unknown" +} + +type Workload struct { + Namespace string + Kind WorkloadKind + Name string +} + +func (t Workload) String() string { + return fmt.Sprintf("%s/%s", t.Kind, t.Name) +} + +func WorkloadKindFromString(s string) (WorkloadKind, error) { + switch s { + case "pods", "pod", "po": + return WorkloadKindPod, nil + case "replicasets.apps", "replicasets", "replicaset", "rs": + return WorkloadKindReplicaSet, nil + case "replicationcontrollers", "replicationcontroller", "rc": + return WorkloadKindReplicationController, nil + case "deployments.apps", "deployments", "deployment", "deploy": + return WorkloadKindDeployment, nil + case "statefulsets.apps", "statefulsets", "statefulset", "sts": + return WorkloadKindStatefulSet, nil + case "daemonsets.apps", "daemonsets", "daemonset", "ds": + return WorkloadKindDaemonSet, nil + case "cronjobs.batch", "cronjob.batch", "cronjobs", "cronjob", "cj": + return WorkloadKindCronJob, nil + case "jobs.batch", "job.batch", "jobs", "job": + return WorkloadKindJob, nil + } + return WorkloadKindUnknown, fmt.Errorf("unrecognized workload: %s", s) +} diff --git a/pkg/kubebench/crd/writer.go b/pkg/kubebench/crd/writer.go new file mode 100644 index 000000000..814a45f4f --- /dev/null +++ b/pkg/kubebench/crd/writer.go @@ -0,0 +1,44 @@ +package crd + +import ( + "errors" + "strings" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + secapi "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + "github.com/aquasecurity/starboard/pkg/kubebench" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" +) + +type writer struct { + client *secapi.Clientset +} + +func NewWriter(config *rest.Config) (w kubebench.Writer, err error) { + client, err := secapi.NewForConfig(config) + if err != nil { + return + } + w = &writer{ + client: client, + } + return +} + +func (w *writer) Write(report sec.CISKubernetesBenchmarkReport, node string) (err error) { + if strings.TrimSpace(node) == "" { + err = errors.New("node name must not be blank") + return + } + // TODO Check if an instance of the report with the given name already exists. + // TODO If exists just update it, create new instance otherwise + _, err = w.client.AquasecurityV1alpha1().CISKubernetesBenchmarks().Create(&sec.CISKubernetesBenchmark{ + ObjectMeta: meta.ObjectMeta{ + Name: node, + Labels: map[string]string{}, + }, + Report: report, + }) + return +} diff --git a/pkg/kubebench/model.go b/pkg/kubebench/model.go new file mode 100644 index 000000000..2ab8d172b --- /dev/null +++ b/pkg/kubebench/model.go @@ -0,0 +1,38 @@ +package kubebench + +import ( + "encoding/json" + "io" + "time" + + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" +) + +func CISBenchmarkReportFrom(reader io.Reader) (report sec.CISKubernetesBenchmarkReport, err error) { + decoder := json.NewDecoder(reader) + report = sec.CISKubernetesBenchmarkReport{ + GeneratedAt: meta.NewTime(time.Now()), + Scanner: sec.Scanner{ + Name: "kube-bench", + Vendor: "Aqua Security", + Version: "latest", + }, + Sections: []sec.CISKubernetesBenchmarkSection{}, + } + + for { + var section sec.CISKubernetesBenchmarkSection + de := decoder.Decode(§ion) + if de == io.EOF { + break + } + if de != nil { + err = de + break + } + report.Sections = append(report.Sections, section) + } + return +} diff --git a/pkg/kubebench/scanner.go b/pkg/kubebench/scanner.go new file mode 100644 index 000000000..a7e34b443 --- /dev/null +++ b/pkg/kubebench/scanner.go @@ -0,0 +1,207 @@ +package kubebench + +import ( + "fmt" + + "k8s.io/klog" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/aquasecurity/starboard/pkg/kube/pod" + "github.com/aquasecurity/starboard/pkg/runner" + "github.com/google/uuid" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + kubeBenchContainerName = "kube-bench" + kubeBenchContainerImage = "aquasec/kube-bench:latest" +) + +var ( + runnerTimeout = 60 * time.Second +) + +type Scanner struct { + clientset kubernetes.Interface + pods *pod.Manager +} + +func NewScanner(config *rest.Config) (*Scanner, error) { + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return &Scanner{ + clientset: clientset, + pods: pod.NewPodManager(clientset), + }, nil +} + +func (s *Scanner) Scan() (report sec.CISKubernetesBenchmarkReport, node string, err error) { + // 1. Prepare descriptor for the Kubernetes Job which will run kube-bench + kubeBenchJob := s.prepareKubeBenchJob() + + // 2. Run the prepared Job and wait for its completion or failure + err = runner.New(runnerTimeout). + Run(kube.NewRunnableJob(s.clientset, kubeBenchJob)) + if err != nil { + err = fmt.Errorf("running kube-bench job: %w", err) + return + } + + defer func() { + // 6. Delete the kube-bench Job + klog.V(3).Infof("Deleting job: %s/%s", kubeBenchJob.Namespace, kubeBenchJob.Name) + background := meta.DeletePropagationBackground + _ = s.clientset.BatchV1().Jobs(kubeBenchJob.Namespace).Delete(kubeBenchJob.Name, &meta.DeleteOptions{ + PropagationPolicy: &background, + }) + }() + + // 3. Get the Pod controlled by the kube-bench Job + kubeBenchPod, err := s.pods.GetPodByJob(kubeBenchJob) + if err != nil { + err = fmt.Errorf("getting kube-bench pod: %w", err) + return + } + + node = kubeBenchPod.Spec.NodeName + + // 4. Get kube-bench JSON output from the kube-bench Pod + klog.V(3).Infof("Getting logs for %s container in job: %s/%s", kubeBenchContainerName, + kubeBenchJob.Namespace, kubeBenchJob.Name) + logsReader, err := s.pods.GetPodLogs(kubeBenchPod, kubeBenchContainerName) + if err != nil { + err = fmt.Errorf("getting logs: %w", err) + return + } + defer func() { + _ = logsReader.Close() + }() + + // 5. Parse the CISBenchmarkReport from the logs Reader + report, err = CISBenchmarkReportFrom(logsReader) + if err != nil { + err = fmt.Errorf("parsing CIS benchmark report: %w", err) + return + } + + return +} + +func (s *Scanner) prepareKubeBenchJob() *batch.Job { + return &batch.Job{ + ObjectMeta: meta.ObjectMeta{ + Name: uuid.New().String(), + // TODO Create the starboard namespace in the init command? + Namespace: core.NamespaceDefault, + Labels: map[string]string{ + "app": "kube-bench", + }, + }, + Spec: batch.JobSpec{ + BackoffLimit: pointer.Int32Ptr(1), + Completions: pointer.Int32Ptr(1), + ActiveDeadlineSeconds: pointer.Int64Ptr(int64(runnerTimeout.Seconds())), + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{ + "app": "kube-bench", + }, + }, + Spec: core.PodSpec{ + RestartPolicy: core.RestartPolicyNever, + HostPID: true, + Volumes: []core.Volume{ + { + Name: "var-lib-etcd", + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: "/var/lib/etcd", + }, + }, + }, + { + Name: "var-lib-kubelet", + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: "/var/lib/kubelet", + }, + }, + }, + { + Name: "etc-systemd", + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: "/etc/systemd", + }, + }, + }, + { + Name: "etc-kubernetes", + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: "/etc/kubernetes", + }, + }, + }, + { + Name: "usr-bin", + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: "/usr/bin", + }, + }, + }, + }, + Containers: []core.Container{ + { + Name: kubeBenchContainerName, + Image: kubeBenchContainerImage, + ImagePullPolicy: core.PullAlways, + Command: []string{"kube-bench"}, + Args: []string{"--json"}, + VolumeMounts: []core.VolumeMount{ + { + Name: "var-lib-etcd", + MountPath: "/var/lib/etcd", + ReadOnly: true, + }, + { + Name: "var-lib-kubelet", + MountPath: "/var/lib/kubelet", + ReadOnly: true, + }, + { + Name: "etc-systemd", + MountPath: "/etc/systemd", + ReadOnly: true, + }, + { + Name: "etc-kubernetes", + MountPath: "/etc/kubernetes", + ReadOnly: true, + }, + { + Name: "usr-bin", + MountPath: "/usr/local/mount-from-host/bin", + ReadOnly: true, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/pkg/kubebench/writer.go b/pkg/kubebench/writer.go new file mode 100644 index 000000000..36f67afa3 --- /dev/null +++ b/pkg/kubebench/writer.go @@ -0,0 +1,9 @@ +package kubebench + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" +) + +type Writer interface { + Write(report sec.CISKubernetesBenchmarkReport, node string) error +} diff --git a/pkg/kubehunter/crd/writer.go b/pkg/kubehunter/crd/writer.go new file mode 100644 index 000000000..d20f51b4d --- /dev/null +++ b/pkg/kubehunter/crd/writer.go @@ -0,0 +1,45 @@ +package crd + +import ( + "errors" + "strings" + + "github.com/aquasecurity/starboard/pkg/kubehunter" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + secapi "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" +) + +type writer struct { + client *secapi.Clientset +} + +func NewWriter(config *rest.Config) (w kubehunter.Writer, err error) { + client, err := secapi.NewForConfig(config) + if err != nil { + return + } + w = &writer{ + client: client, + } + return +} + +func (w *writer) Write(report sec.KubeHunterOutput, cluster string) (err error) { + if strings.TrimSpace(cluster) == "" { + err = errors.New("cluster name must not be blank") + return + } + // TODO Check if an instance of the report with the given name already exists. + // TODO If exists just update it, create new instance otherwise + _, err = w.client.AquasecurityV1alpha1().KubeHunterReports().Create(&sec.KubeHunterReport{ + ObjectMeta: meta.ObjectMeta{ + Name: cluster, + Labels: map[string]string{}, + }, + Report: report, + }) + return +} diff --git a/pkg/kubehunter/model.go b/pkg/kubehunter/model.go new file mode 100644 index 000000000..0fa07c3fe --- /dev/null +++ b/pkg/kubehunter/model.go @@ -0,0 +1,21 @@ +package kubehunter + +import ( + "encoding/json" + "io" + "time" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func OutputFrom(reader io.Reader) (report sec.KubeHunterOutput, err error) { + report.GeneratedAt = meta.NewTime(time.Now()) + report.Scanner = sec.Scanner{ + Name: "kube-hunter", + Vendor: "Aqua Security", + Version: "latest", + } + err = json.NewDecoder(reader).Decode(&report) + return +} diff --git a/pkg/kubehunter/scanner.go b/pkg/kubehunter/scanner.go new file mode 100644 index 000000000..6c9cc9ecf --- /dev/null +++ b/pkg/kubehunter/scanner.go @@ -0,0 +1,128 @@ +package kubehunter + +import ( + "fmt" + "time" + + "k8s.io/klog" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/aquasecurity/starboard/pkg/kube/pod" + "github.com/aquasecurity/starboard/pkg/runner" + "github.com/google/uuid" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/utils/pointer" +) + +const ( + kubeHunterContainerName = "kube-hunter" + kubeHunterContainerImage = "aquasec/kube-hunter:latest" +) + +var ( + runnerTimeout = 90 * time.Second + jobTimeout = 60 * time.Second +) + +type Scanner struct { + clientset kubernetes.Interface + pods *pod.Manager +} + +func NewScanner(config *rest.Config) (*Scanner, error) { + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return &Scanner{ + clientset: clientset, + pods: pod.NewPodManager(clientset), + }, nil +} + +func (s *Scanner) Scan() (report sec.KubeHunterOutput, err error) { + // 1. Prepare descriptor for the Kubernetes Job which will run kube-hunter + kubeHunterJob := s.prepareKubeHunterJob() + + // 2. Run the prepared Job and wait for its completion or failure + err = runner.New(runnerTimeout). + Run(kube.NewRunnableJob(s.clientset, kubeHunterJob)) + if err != nil { + err = fmt.Errorf("running kube-hunter job: %w", err) + return + } + + defer func() { + // 5. Delete the kube-hunter Job + klog.V(3).Infof("Deleting job: %s/%s", kubeHunterJob.Namespace, kubeHunterJob.Name) + background := meta.DeletePropagationBackground + _ = s.clientset.BatchV1().Jobs(kubeHunterJob.Namespace).Delete(kubeHunterJob.Name, &meta.DeleteOptions{ + PropagationPolicy: &background, + }) + }() + + // 3. Get kube-hunter JSON output from the kube-hunter Pod + klog.V(3).Infof("Getting logs for %s container in job: %s/%s", kubeHunterContainerName, + kubeHunterJob.Namespace, kubeHunterJob.Name) + logsReader, err := s.pods.GetPodLogsByJob(kubeHunterJob, kubeHunterContainerName) + if err != nil { + err = fmt.Errorf("getting logs: %w", err) + return + } + defer func() { + _ = logsReader.Close() + }() + + // 4. Parse the KubeHuberOutput from the logs Reader + report, err = OutputFrom(logsReader) + if err != nil { + err = fmt.Errorf("parsing kube hunter report: %w", err) + return + } + + return +} + +func (s *Scanner) prepareKubeHunterJob() *batch.Job { + return &batch.Job{ + ObjectMeta: meta.ObjectMeta{ + Name: uuid.New().String(), + // TODO Create the starboard namespace in the init command? + Namespace: core.NamespaceDefault, + Labels: map[string]string{ + "app": "kube-hunter", + }, + }, + Spec: batch.JobSpec{ + BackoffLimit: pointer.Int32Ptr(1), + Completions: pointer.Int32Ptr(1), + ActiveDeadlineSeconds: pointer.Int64Ptr(int64(jobTimeout.Seconds())), + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{ + "app": "kube-hunter", + }, + }, + Spec: core.PodSpec{ + RestartPolicy: core.RestartPolicyNever, + HostPID: true, + Containers: []core.Container{ + { + Name: kubeHunterContainerName, + Image: kubeHunterContainerImage, + ImagePullPolicy: core.PullAlways, + Command: []string{"python", "kube-hunter.py"}, + Args: []string{"--pod", "--report", "json", "--log", "warn"}, + }, + }, + }, + }, + }, + } +} diff --git a/pkg/kubehunter/writer.go b/pkg/kubehunter/writer.go new file mode 100644 index 000000000..e85adae77 --- /dev/null +++ b/pkg/kubehunter/writer.go @@ -0,0 +1,9 @@ +package kubehunter + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" +) + +type Writer interface { + Write(report sec.KubeHunterOutput, cluster string) error +} diff --git a/pkg/polaris/converter.go b/pkg/polaris/converter.go new file mode 100644 index 000000000..cbbbec2bf --- /dev/null +++ b/pkg/polaris/converter.go @@ -0,0 +1,93 @@ +package polaris + +import ( + "encoding/json" + "io" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/aquasecurity/starboard/pkg/ext" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Converter interface { + Convert(reader io.Reader) ([]sec.ConfigAudit, error) +} + +type converter struct { + clock ext.Clock +} + +var DefaultConverter = NewConverter(ext.NewSystemClock()) + +func NewConverter(clock ext.Clock) Converter { + return &converter{ + clock: clock, + } +} + +func (c *converter) Convert(reader io.Reader) (reports []sec.ConfigAudit, err error) { + var report Report + err = json.NewDecoder(reader).Decode(&report) + if err != nil { + return + } + reports = c.convert(report) + return +} + +func (c *converter) convert(report Report) (reports []sec.ConfigAudit) { + reports = make([]sec.ConfigAudit, len(report.Results)) + for i, result := range report.Results { + reports[i] = c.toConfigAudit(result) + } + return +} + +func (c *converter) toConfigAudit(result Result) (report sec.ConfigAudit) { + var podChecks []sec.Check + containerChecks := make(map[string][]sec.Check) + + for _, pr := range result.PodResult.Results { + podChecks = append(podChecks, sec.Check{ + ID: pr.ID, + Message: pr.Message, + Success: pr.Success, + Severity: pr.Severity, + Category: pr.Category, + }) + } + + for _, cr := range result.PodResult.ContainerResults { + var checks []sec.Check + for _, crr := range cr.Results { + checks = append(checks, sec.Check{ + ID: crr.ID, + Message: crr.Message, + Success: crr.Success, + Severity: crr.Severity, + Category: crr.Category, + }) + + } + containerChecks[cr.Name] = checks + } + + report = sec.ConfigAudit{ + GeneratedAt: metav1.NewTime(c.clock.Now()), + Scanner: sec.Scanner{ + Name: "Polaris", + Vendor: "Fairwinds", + Version: "latest", + }, + Resource: sec.KubernetesNamespacedResource{ + Namespace: result.Namespace, + KubernetesResource: sec.KubernetesResource{ + Kind: result.Kind, + Name: result.Name, + }, + }, + PodChecks: podChecks, + ContainerChecks: containerChecks, + } + return +} diff --git a/pkg/polaris/converter_test.go b/pkg/polaris/converter_test.go new file mode 100644 index 000000000..43acba5bf --- /dev/null +++ b/pkg/polaris/converter_test.go @@ -0,0 +1,77 @@ +package polaris + +import ( + "os" + "testing" + "time" + + "github.com/aquasecurity/starboard/pkg/ext" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConverter_Convert(t *testing.T) { + file, err := os.Open("test_fixture/polaris-report.json") + require.NoError(t, err) + defer func() { + _ = file.Close() + }() + now := time.Now() + + reports, err := NewConverter(ext.NewFixedClock(now)).Convert(file) + require.NoError(t, err) + assert.Equal(t, []v1alpha1.ConfigAudit{ + { + GeneratedAt: meta.NewTime(now), + Scanner: v1alpha1.Scanner{ + Name: "Polaris", + Vendor: "Fairwinds", + Version: "latest", + }, + Resource: v1alpha1.KubernetesNamespacedResource{ + Namespace: "aqua", + KubernetesResource: v1alpha1.KubernetesResource{ + Kind: "Deployment", + Name: "csp-database", + }, + }, + PodChecks: []v1alpha1.Check{ + { + ID: "hostIPCSet", + Message: "Host IPC is not configured", + Success: true, + Severity: "error", + Category: "Security", + }, + { + ID: "hostNetworkSet", + Message: "Host network is not configured", + Success: true, + Severity: "warning", + Category: "Networking", + }, + }, + ContainerChecks: map[string][]v1alpha1.Check{ + "db": { + { + ID: "cpuLimitsMissing", + Message: "CPU limits are set", + Success: true, + Severity: "warning", + Category: "Resources", + }, + { + ID: "cpuRequestsMissing", + Message: "CPU requests are set", + Success: true, + Severity: "warning", + Category: "Resources", + }, + }, + }, + }, + }, reports) +} diff --git a/pkg/polaris/crd/writer.go b/pkg/polaris/crd/writer.go new file mode 100644 index 000000000..b2d819aae --- /dev/null +++ b/pkg/polaris/crd/writer.go @@ -0,0 +1,61 @@ +package crd + +import ( + "fmt" + "strings" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + clientset "github.com/aquasecurity/starboard/pkg/generated/clientset/versioned" + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/aquasecurity/starboard/pkg/polaris" + "k8s.io/apimachinery/pkg/api/errors" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +type writer struct { + client clientset.Interface +} + +func NewWriter(client clientset.Interface) polaris.Writer { + return &writer{ + client: client, + } +} + +func (w *writer) Write(report sec.ConfigAudit) (err error) { + namespace := report.Resource.Namespace + name := fmt.Sprintf("%s.%s", strings.ToLower(report.Resource.Kind), report.Resource.Name) + + existingCR, err := w.client.AquasecurityV1alpha1().ConfigAuditReports(namespace).Get(name, meta.GetOptions{}) + + switch { + case err == nil: + klog.V(3).Infof("Updating config audit report: %s/%s", namespace, name) + deepCopy := existingCR.DeepCopy() + deepCopy.Report = report + _, err = w.client.AquasecurityV1alpha1().ConfigAuditReports(namespace).Update(deepCopy) + case errors.IsNotFound(err): + klog.V(3).Infof("Creating config audit report: %s/%s", namespace, name) + _, err = w.client.AquasecurityV1alpha1().ConfigAuditReports(namespace). + Create(&sec.ConfigAuditReport{ + ObjectMeta: meta.ObjectMeta{ + Name: name, + Labels: map[string]string{ + kube.LabelWorkloadKind: report.Resource.Kind, + kube.LabelWorkloadName: report.Resource.Name, + }, + }, + Report: report, + }) + return + } + return +} + +func (w *writer) WriteAll(reports []sec.ConfigAudit) (err error) { + for _, report := range reports { + err = w.Write(report) + } + return +} diff --git a/pkg/polaris/model.go b/pkg/polaris/model.go new file mode 100644 index 000000000..f752212cb --- /dev/null +++ b/pkg/polaris/model.go @@ -0,0 +1,42 @@ +package polaris + +type Report struct { + PolarisOutputVersion string `json:"PolarisOutputVersion"` + SourceType string `json:"SourceType"` + ClusterInfo *ClusterInfo `json:"ClusterInfo"` + Results []Result `json:"Results"` +} + +type ClusterInfo struct { + Version string `json:"Version"` + Nodes int `json:"Nodes"` + Pods int `json:"Pods"` + Namespaces int `json:"Namespaces"` + Controllers int `json:"Controllers"` +} + +type Result struct { + Name string `json:"Name"` + Namespace string `json:"Namespace"` + Kind string `json:"Kind"` + PodResult PodResult `json:"PodResult"` +} + +type PodResult struct { + Name string `json:"Name"` + Results map[string]Check `json:"Results"` + ContainerResults []ContainerResult `json:"ContainerResults"` +} + +type ContainerResult struct { + Name string `json:"Name"` + Results map[string]Check `json:"Results"` +} + +type Check struct { + ID string `json:"ID"` + Message string `json:"Message"` + Success bool `json:"Success"` + Severity string `json:"Severity"` + Category string `json:"Category"` +} diff --git a/pkg/polaris/scanner.go b/pkg/polaris/scanner.go new file mode 100644 index 000000000..d84e2dfc0 --- /dev/null +++ b/pkg/polaris/scanner.go @@ -0,0 +1,142 @@ +package polaris + +import ( + "fmt" + "time" + + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" + + "k8s.io/utils/pointer" + + "github.com/aquasecurity/starboard/pkg/kube" + "github.com/aquasecurity/starboard/pkg/runner" + "k8s.io/klog" + + "github.com/aquasecurity/starboard/pkg/kube/pod" + "github.com/google/uuid" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + runnerTimeout = 90 * time.Second + jobTimeout = 60 * time.Second +) + +const ( + polarisContainerName = "polaris" + // TODO: The latest semver tagged image 0.6.0 doesn't return audit checks ?! + polarisContainerImage = "quay.io/fairwinds/polaris:cfc0d213cd603793d8e36eecfb0def1579a34997" + polarisConfigVolume = "config-volume" + polarisConfigMap = "polaris-config" +) + +type Scanner struct { + clientset kubernetes.Interface + pods *pod.Manager + converter Converter +} + +func NewScanner(config *rest.Config) (*Scanner, error) { + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return &Scanner{ + clientset: clientset, + pods: pod.NewPodManager(clientset), + converter: DefaultConverter, + }, nil +} + +func (s *Scanner) Scan() (reports []sec.ConfigAudit, err error) { + polarisJob := s.preparePolarisJob() + + err = runner.New(runnerTimeout). + Run(kube.NewRunnableJob(s.clientset, polarisJob)) + if err != nil { + err = fmt.Errorf("running polaris job: %w", err) + return + } + + defer func() { + klog.V(3).Infof("Deleting job: %s/%s", polarisJob.Namespace, polarisJob.Name) + background := meta.DeletePropagationBackground + _ = s.clientset.BatchV1().Jobs(polarisJob.Namespace).Delete(polarisJob.Name, &meta.DeleteOptions{ + PropagationPolicy: &background, + }) + }() + + klog.V(3).Infof("Getting logs for %s container in job: %s/%s", polarisContainerName, + polarisJob.Namespace, polarisJob.Name) + logsReader, err := s.pods.GetPodLogsByJob(polarisJob, polarisContainerName) + if err != nil { + err = fmt.Errorf("getting logs: %w", err) + return + } + + reports, err = s.converter.Convert(logsReader) + defer func() { + _ = logsReader.Close() + }() + return +} + +func (s *Scanner) preparePolarisJob() *batch.Job { + return &batch.Job{ + ObjectMeta: meta.ObjectMeta{ + Name: uuid.New().String(), + // TODO Create the starboard namespace in the init command? + Namespace: "starboard", + Labels: map[string]string{ + "app": "polaris", + }, + }, + Spec: batch.JobSpec{ + BackoffLimit: pointer.Int32Ptr(1), + Completions: pointer.Int32Ptr(1), + ActiveDeadlineSeconds: pointer.Int64Ptr(int64(jobTimeout.Seconds())), + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{ + "app": "polaris", + }, + }, + Spec: core.PodSpec{ + ServiceAccountName: "starboard", + RestartPolicy: core.RestartPolicyNever, + Volumes: []core.Volume{ + { + Name: polarisConfigVolume, + VolumeSource: core.VolumeSource{ + ConfigMap: &core.ConfigMapVolumeSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: polarisConfigMap, + }, + }, + }, + }, + }, + Containers: []core.Container{ + { + Name: polarisContainerName, + Image: polarisContainerImage, + ImagePullPolicy: core.PullIfNotPresent, + VolumeMounts: []core.VolumeMount{ + { + Name: polarisConfigVolume, + MountPath: "/examples", + }, + }, + Command: []string{"polaris"}, + Args: []string{"audit", "--log-level", "error"}, + }, + }, + }, + }, + }, + } +} diff --git a/pkg/polaris/test_fixture/polaris-report.json b/pkg/polaris/test_fixture/polaris-report.json new file mode 100644 index 000000000..8d88a1bf5 --- /dev/null +++ b/pkg/polaris/test_fixture/polaris-report.json @@ -0,0 +1,63 @@ +{ + "PolarisOutputVersion": "1.0", + "AuditTime": "2020-04-24T12:02:39Z", + "SourceType": "Cluster", + "SourceName": "https://10.96.0.1:443", + "DisplayName": "https://10.96.0.1:443", + "ClusterInfo": { + "Version": "1.18", + "Nodes": 1, + "Pods": 20, + "Namespaces": 7, + "Controllers": 20 + }, + "Results": [ + { + "Name": "csp-database", + "Namespace": "aqua", + "Kind": "Deployment", + "Results": {}, + "PodResult": { + "Name": "", + "Results": { + "hostIPCSet": { + "ID": "hostIPCSet", + "Message": "Host IPC is not configured", + "Success": true, + "Severity": "error", + "Category": "Security" + }, + "hostNetworkSet": { + "ID": "hostNetworkSet", + "Message": "Host network is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + } + }, + "ContainerResults": [ + { + "Name": "db", + "Results": { + "cpuLimitsMissing": { + "ID": "cpuLimitsMissing", + "Message": "CPU limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "cpuRequestsMissing": { + "ID": "cpuRequestsMissing", + "Message": "CPU requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + } + } + } + ] + }, + "CreatedTime": "0001-01-01T00:00:00Z" + } + ] +} diff --git a/pkg/polaris/writer.go b/pkg/polaris/writer.go new file mode 100644 index 000000000..d42a590df --- /dev/null +++ b/pkg/polaris/writer.go @@ -0,0 +1,15 @@ +package polaris + +import ( + sec "github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1" +) + +// Write is the interface that wraps basic methods for persisting ConfigAudit reports. +// +// Write persists the given ConfigAudit report. +// +// WriteAll persists the given slice of ConfigAudit reports. +type Writer interface { + Write(report sec.ConfigAudit) (err error) + WriteAll(reports []sec.ConfigAudit) (err error) +} diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go new file mode 100644 index 000000000..bd99c377d --- /dev/null +++ b/pkg/runner/runner.go @@ -0,0 +1,67 @@ +package runner + +import ( + "errors" + "time" + + "k8s.io/klog" +) + +// ErrTimeout is returned when Runner's Run method fails due to a timeout event. +var ErrTimeout = errors.New("runner received timeout") + +// Runnable is the interface that wraps the basic Run method. +// +// Run should be implemented by any task intended to be executed by the Runner. +type Runnable interface { + Run() error +} + +// The RunnableFunc type is an adapter to allow the use of ordinary functions as Runnable tasks. +// If f is a function with the appropriate signature, RunnableFunc(f) is a Runnable that calls f. +type RunnableFunc func() error + +// Run calls f() +func (f RunnableFunc) Run() error { + return f() +} + +// Runner is the interface that wraps the basic Run method. +// +// Run executes submitted Runnable tasks. +type Runner interface { + Run(task Runnable) error +} + +type runner struct { + // complete channel reports that processing is done + complete chan error + // timeout channel reports that time has run out + timeout <-chan time.Time +} + +// New constructs a new ready-to-use Runner with the specified timeout for running a Task. +func New(d time.Duration) Runner { + return &runner{ + complete: make(chan error), + timeout: time.After(d), + } +} + +// Run runs the specified task and monitors channel events. +func (r *runner) Run(task Runnable) error { + go func() { + r.complete <- task.Run() + }() + + select { + // Signaled when processing is done. + case err := <-r.complete: + klog.V(3).Infof("Stopping runner on task completion with error: %v", err) + return err + // Signaled when we run out of time. + case <-r.timeout: + klog.V(3).Info("Stopping runner on timeout") + return ErrTimeout + } +}