Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Development and Deployment to Minikube #48

Merged
merged 1 commit into from
Jan 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 82 additions & 9 deletions build/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ VERSION ?= $(base_version)-$(shell git rev-parse --short HEAD)
# The registry that is being used to store docker images
REGISTRY ?= gcr.io/agon-images
# Where the kubectl configuration files are being stored
KUBECONFIG ?= $(build_path)/.kube
KUBEPATH ?= ~/.kube
# The (gcloud) test cluster that is being worked against
CLUSTER_NAME ?= test-cluster
# the profile to use when developing on minikube
MINIKUBE_PROFILE ?= agon

# Directory that this Makefile is in.
mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST)))
Expand All @@ -48,7 +50,7 @@ agon_path := $(realpath $(build_path)/..)
agon_package = github.com/agonio/agon
mount_path = /go/src/$(agon_package)
common_mounts = -v $(build_path)/.config/gcloud:/root/.config/gcloud \
-v $(KUBECONFIG):/root/.kube \
-v $(KUBEPATH):/root/.kube \
-v $(agon_path):$(mount_path)

# Use a hash of the Dockerfile for the tag, so when the Dockerfile changes,
Expand Down Expand Up @@ -84,11 +86,15 @@ test: ensure-build-image
# Push all the images up to $(REGISTRY)
push: push-gameservers-controller-image push-gameservers-sidecar-image

# install the development version of Agon
# Installs the current development version of Agon into the Kubernetes cluster
install: ALWAYS_PULL_SIDECAR := true
install: IMAGE_PULL_POLICY := "Always"
install:
cp $(build_path)/install.yaml $(build_path)/.install.yaml
sed -i -e 's!$${REGISTRY}!$(REGISTRY)!g' -e 's!$${VERSION}!$(VERSION)!g' $(build_path)/.install.yaml
docker run --rm $(common_mounts) --entrypoint=kubectl $(build_tag) apply -f $(mount_path)/build/.install.yaml
sed -i -e 's!$${REGISTRY}!$(REGISTRY)!g' -e 's!$${VERSION}!$(VERSION)!g' \
-e 's!$${IMAGE_PULL_POLICY}!$(IMAGE_PULL_POLICY)!g' -e 's!$${ALWAYS_PULL_SIDECAR}!$(ALWAYS_PULL_SIDECAR)!g' \
$(build_path)/.install.yaml
docker run --rm $(common_mounts) $(ARGS) $(build_tag) kubectl apply -f $(mount_path)/build/.install.yaml

# Build a static binary for the gameserver controller
build-gameservers-controller-binary: ensure-build-image
Expand Down Expand Up @@ -182,6 +188,13 @@ push-build-image:
docker tag $(build_tag) $(build_remote_tag)
docker push $(build_remote_tag)

# ____ _ ____ _ _
# / ___| ___ ___ __ _| | ___ / ___| | ___ _ _ __| |
# | | _ / _ \ / _ \ / _` | |/ _ \ | | | |/ _ \| | | |/ _` |
# | |_| | (_) | (_) | (_| | | __/ | |___| | (_) | |_| | (_| |
# \____|\___/ \___/ \__, |_|\___| \____|_|\___/ \__,_|\__,_|
# |___/

# Initialise the gcloud login and project configuration, if you are working with GCP
gcloud-init: ensure-build-config
docker run --rm -it \
Expand Down Expand Up @@ -213,7 +226,67 @@ gcloud-auth-docker: ensure-build-image
sudo mv /tmp/gcloud-auth-docker/.dockercfg ~/
sudo chown $(USER) ~/.dockercfg

# Clean the kubernetes and gcloud configuration
clean-config:
-sudo rm -r $(build_path)/.kube
-sudo rm -r $(build_path)/.config
# Clean the gcloud configuration
clean-gcloud-config:
-sudo rm -r $(build_path)/.config

# __ __ _ _ _ _
# | \/ (_)_ __ (_) | ___ _| |__ ___
# | |\/| | | '_ \| | |/ / | | | '_ \ / _ \
# | | | | | | | | | <| |_| | |_) | __/
# |_| |_|_|_| |_|_|_|\_\\__,_|_.__/ \___|
#

# Switches to an agon profile, and starts a kubernetes cluster
# of the right version. Also mounts the project directory into minikube,
# so that the build tools will work.
#
# Use DRIVER variable to change the VM driver (default virtualbox) if you so desire.
minikube-test-cluster: DRIVER := virtualbox
minikube-test-cluster: minikube-agon-profile
minikube start --kubernetes-version v1.8.0 --vm-driver $(DRIVER)
$(MAKE) minikube-ensure-build-image
minikube mount $(agon_path):$(agon_path)

# switch to the agon cluster
minikube-agon-profile:
minikube profile $(MINIKUBE_PROFILE)

# Connecting to minikube requires so enhanced permissions, so use this target
# instead of `make shell` to start an interactive shell for development on minikube.
minikube-shell: ensure-build-image
eval $$(minikube docker-env --unset) && \
$(MAKE) shell ARGS="--network=host -v ~/.minikube:$(HOME)/.minikube"

# Convenience target to build Agon's docker images directly on minikube.
minikube-build: minikube-ensure-build-image
eval $$(minikube docker-env) && \
$(MAKE) build-images

# ensure minikube has the build image, if not, grab it
minikube-ensure-build-image: ensure-build-image
@if [ -z $$(minikube ssh -- docker images -q $(build_tag)) ]; then\
echo "Could not find $(build_tag) image. Transferring...";\
$(MAKE) minikube-transfer TAG=$(build_tag);\
fi

# Instead of building Agon's docker images inside minikube,
# use this command to push the local images that have already been built
# via `make build` or `make build-images`.
#
# Depending on the virtualisation driver/configuration,
# it may be faster to build locally and push, rather than building directly on minikube.
minikube-push:
$(MAKE) minikube-transfer TAG=$(sidecar_tag)
$(MAKE) minikube-transfer TAG=$(controller_tag)

# Installs the current development version of Agon into the Kubernetes cluster.
# Use this instead of `make install`, as it disables PullAlways on the install.yaml
minikube-install: ensure-build-image
eval $$(minikube docker-env --unset) && \
$(MAKE) install ARGS="--network=host -v ~/.minikube:/$(HOME)/.minikube" ALWAYS_PULL_SIDECAR=false IMAGE_PULL_POLICY=IfNotPresent

# convenience target for transferring images into minikube
minikube-transfer:
eval $$(minikube docker-env --unset) && \
docker save $(TAG) | (eval $$(minikube docker-env) && docker load)
112 changes: 102 additions & 10 deletions build/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ Rather than installing all the dependencies locally, you can test and build Agon
built from the Dockerfile in this directory. There is an accompanying Makefile for all the common
tasks you may wish to accomplish.

**Note** - this has been tested on Linux. Tickets for [OSX](https://github.com/googleprivate/agon/issues/46)
and [Windows](https://github.com/googleprivate/agon/issues/47) exist, and require work. Testing on these platforms
and reporting bugs is appreciated.

<!-- ToC start -->
## Table of Contents

Expand All @@ -18,12 +22,13 @@ tasks you may wish to accomplish.
1. [Make Variable Reference](#make-variable-reference)
1. [VERSION](#version)
1. [REGISTRY](#registry)
1. [KUBECONFIG](#kubeconfig)
1. [KUBEPATH](#kubepath)
1. [CLUSTER_NAME](#cluster_name)
1. [Make Target Reference](#make-target-reference)
1. [Development Targets](#development-targets)
1. [Build Image Targets](#build-image-targets)
1. [Google Cloud Platform](#google-cloud-platform)
1. [Minikube](#minikube)
<!-- ToC end -->

## GOPATH
Expand Down Expand Up @@ -69,8 +74,8 @@ to be open to UDP traffic.

First step is to create a Google Cloud Project at https://console.cloud.google.com or reuse an existing one.

The build tools (by default) maintain configuration for gcloud and kubectl within the `build` folder, so as to keep
everything seperate (see below for overwriting these config locations). Therefore, once the project has been created,
The build tools (by default) maintain configuration for gcloud within the `build` folder, so as to keep
everything separate (see below for overwriting these config locations). Therefore, once the project has been created,
we will need to authenticate out gcloud tooling against it. To do that run `make gcloud-init` and fill in the
prompts as directed.

Expand All @@ -81,8 +86,8 @@ done you can go to the Google Cloud Platform console and see that a cluster is u
name of the test cluster you can set the `CLUSTER_NAME` environemnt varlable to value you would like.

To grab the kubectl authentication details for this cluster, run `make gcloud-auth-cluster`, which will generate the
required Kubernetes security credintials for `kubectl`. This will be stored in `build/.kube` by default, but can also be
overwritten by setting the `KUBECONFIG` environment variable before running the command.
required Kubernetes security credintials for `kubectl`. This will be stored in `~/.kube` by default, but can also be
overwritten by setting the `KUBEPATH` environment variable before running the command.

Great! Now we are setup, let's try out the development shell, and see if our `kubectl` is working!

Expand All @@ -106,11 +111,60 @@ To push our images up at this point, is simple `make push` and that will push up
project's container registry.

Now that the images are pushed, to install the development version (with all imagePolicies set to always download),
run `make install` and agon will install the image that you just built and pushed on the test cluster you
run `make install` and Agon will install the image that you just built and pushed on the test cluster you
created at the beginning of this section. (if you want to see the resulting installation yaml, you can find it in `build/.install.yaml`)

### Running a Test Minikube cluster
(Coming soon: Track [this bug](https://github.com/googleprivate/agon/issues/30) for details)
This will setup a [Minikube](https://github.com/kubernetes/minikube) cluster, running on an `agon` profile,

Because Minikube runs on a virtualisation layer on the host, some of the standard build and development Make targets
need to be replaced by Minikube specific targets.

First, [install Minikube](https://github.com/kubernetes/minikube#installation), which may also require you to install
a virtualisation solution, such as [VirtualBox](https://www.virtualbox.org) as well.

Next we will create the Agon Minikube cluster. Run `make minikube-test-cluster` to create an `agon` profile,
create a Kubernetes cluster under this profile of the supported version,
and mount the development code inside the Minikube instance so we are able to build Agon inside Minikube.

This will also install the kubectl authentication credentials in `~/.kube`, and set the
[`kubectl` context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)
to `agon`.

Great! Now we are setup, let's try out the development shell, and see if our `kubectl` is working!

Run `make minikube-shell` to enter the development shell. You should see a bash shell that has you as the root user.
Enter `kubectl get pods` and press enter. You should see that you have no resources currently, but otherwise see no errors.
Assuming that all works, let's exit the shell by typing `exit` and hitting enter, and look at a couple of
options for building, pushing and installing Agon next.

There are two options for building Agon, and depending on your virtualisation solution and its configuration
each has it's pros and cons

#### Building directly on Minikube
Since Minikube allows you to [reuse its Docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md)
we can build our images to run Agon directly on Minikube!

To do this, run `make minikube-build`, which will transfer the build image into the cluster
and run the `build-images` target on the Minikube instance, creating the images required to run Agon.

Again depending on your virtualisation layer, you may want to configure it to allow it to have access to more
cores and/or memory than the default, to allow for faster compilation (or for it to compile at all).

#### Pushing locally built images to Minikube
You may remember in the first part of this walkthrough, we ran `make build`, which created all the images and binaries
we needed to work with Agon locally. So instead of rebuilding them, can we push them straight into Minikube?

You bet we can!

Run `make minikube-push` which will send all of Agon's docker images from your local Docker into the Agon Minikube
instance.

This may be better option if you find building on Minikube slow, or you just prefer to build locally.

Now that the images are pushed, to install the development version,
run `make minikube-install` and Agon will install the images that you built and pushed to the Agon Minikube instance
created at the beginning of this section. (if you want to see the resulting installation yaml, you can find it in `build/.install.yaml`)

### Next Steps

Expand All @@ -124,8 +178,9 @@ The version of this build. Version defaults to the short hash of the latest comm
### REGISTRY
The registry that is being used to store docker images. Defaults to gcr.io/agon-images - the release + CI registry.

### KUBECONFIG
Where the kubectl configuration files are being stored for shell and kubectl targets. Defaults to build/.kube
### KUBEPATH
The directory the kubectl configuration files are being stored for shell and kubectl targets.
Defaults to ~/.kube (where your Kubernetes configs are likely to already exist)

### CLUSTER_NAME
The (gcloud) test cluster that is being worked against. Defaults to `test-cluster`
Expand Down Expand Up @@ -156,6 +211,9 @@ Run all tests
#### `make push`
Pushes all built images up to the `$(REGISTRY)`

#### `make install`
Installs the current development version of Agon into the Kubernetes cluster

#### `make shell`
Run a bash shell with the developer tools (go tooling, kubectl, etc) and source code in it.

Expand Down Expand Up @@ -189,7 +247,7 @@ Creates the build docker image

### Google Cloud Platform

A set of utilities for setting up a Container Engine cluster on Google Cloud Platform,
A set of utilities for setting up a Kubernetes Engine cluster on Google Cloud Platform,
since it's an easy way to get a test cluster working with Kubernetes.

#### `make gcloud-init`
Expand All @@ -205,3 +263,37 @@ Pulls down authentication information for kubectl against a cluster, name can be
#### `make gcloud-auth-docker`
Creates a short lived access to Google Cloud container repositories, so that you are able to call
`docker push` directly. Useful when used in combination with `make push` command.

### Minikube

A set of utilities for setting up and running a [Minikube](https://github.com/kubernetes/minikube) instance,
for local development.

Since Minikube runs locally, there are some targets that need to be used instead of the standard ones above.

#### `minikube-test-cluster`
Switches to an "agon" profile, and starts a kubernetes cluster
of the right version. Also mounts the project directory into Minikube,
so that the build tools will work.

Use DRIVER variable to change the VM driver (default virtualbox) if you so desire.

#### `minikube-build`
Convenience target to build Agon's docker images directly on Minikube.

#### `minikube-push`
Instead of building Agon's docker images inside Minikube,
use this command to push the local images that have already been built
via `make build` or `make build-images`.

#### `minikube-install`
Installs the current development version of Agon into the Kubernetes cluster.
Use this instead of `make install`, as it disables PullAlways on the install.yaml

#### `minikube-shell`
Connecting to Minikube requires so enhanced permissions, so use this target
instead of `make shell` to start an interactive shell for development on Minikube.

Depending on the virtualisation driver/configuration,
it may be faster to build locally and push, rather than building directly on Minikube.

4 changes: 2 additions & 2 deletions build/install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ spec:
containers:
- name: gameservers-controller
image: ${REGISTRY}/gameservers-controller:${VERSION}
imagePullPolicy: Always
imagePullPolicy: ${IMAGE_PULL_POLICY}
env:
- name: ALWAYS_PULL_SIDECAR # set the sidecar imagePullPolicy to Always
value: "true"
value: "${ALWAYS_PULL_SIDECAR}"
- name: SIDECAR # overwrite the GameServer sidecar image that is used
value: ${REGISTRY}/gameservers-sidecar:${VERSION}
- name: MIN_PORT
Expand Down
2 changes: 1 addition & 1 deletion examples/cpp-simple/gameserver.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ spec:
containers:
- name: cpp-simple
image: gcr.io/agon-images/cpp-simple-server:0.1
imagePullPolicy: Always
# imagePullPolicy: Always # add for development
2 changes: 1 addition & 1 deletion examples/simple-udp/server/gameserver.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ spec:
containers:
- name: simple-udp
image: gcr.io/agon-images/udp-server:0.1
imagePullPolicy: Always
# imagePullPolicy: Always # add for development
21 changes: 16 additions & 5 deletions gameservers/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -401,9 +401,9 @@ func (c *Controller) syncGameServerRequestReadyState(gs *stablev1alpha1.GameServ
if err != nil {
return gs, errors.Wrapf(err, "error getting pod for GameServer %s", gs.ObjectMeta.Name)
}
addr, err := c.externalIP(pod)
addr, err := c.Address(pod)
if err != nil {
return gs, errors.Wrapf(err, "error getting external ip for GameServer %s", gs.ObjectMeta.Name)
return gs, errors.Wrapf(err, "error getting external Address for GameServer %s", gs.ObjectMeta.Name)
}

gsCopy := gs.DeepCopy()
Expand Down Expand Up @@ -479,8 +479,11 @@ func (c *Controller) listGameServerPods(gs *stablev1alpha1.GameServer) ([]*corev
return result, nil
}

// ExternalIP returns the external IP that the given Pod is being run on
func (c Controller) externalIP(pod *corev1.Pod) (string, error) {
// Address returns the IP that the given Pod is being run on
// This should be the externalIP, but if the externalIP is
// not set, it will fall back to the internalIP with a warning.
// (basically because minikube only has an internalIP)
func (c Controller) Address(pod *corev1.Pod) (string, error) {
node, err := c.nodeLister.Get(pod.Spec.NodeName)
if err != nil {
return "", errors.Wrapf(err, "error retrieving node %s for Pod %s", node.ObjectMeta.Name, pod.ObjectMeta.Name)
Expand All @@ -492,7 +495,15 @@ func (c Controller) externalIP(pod *corev1.Pod) (string, error) {
}
}

return "", errors.Errorf("Could not find an external ip for Node: #%s", node.ObjectMeta.Name)
// minikube only has an InternalIP on a Node, so we'll fall back to that.
logrus.WithField("node", node.ObjectMeta.Name).Warn("Could not find ExternalIP. Falling back to Internal")
for _, a := range node.Status.Addresses {
if a.Type == corev1.NodeInternalIP {
return a.Address, nil
}
}

return "", errors.Errorf("Could not find an Address for Node: %s", node.ObjectMeta.Name)
}

// waitForEstablishedCRD blocks until CRD comes to an Established state.
Expand Down
Loading