Skip to content
This repository has been archived by the owner on Apr 17, 2019. It is now read-only.

Convert registry to k8s.gcr.io #2814

Merged
merged 2 commits into from
Feb 2, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1
- image: k8s.gcr.io/elasticsearch:v2.4.1
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: fluentd-es
image: gcr.io/google_containers/fluentd-elasticsearch:1.20
image: k8s.gcr.io/fluentd-elasticsearch:1.20
command:
- '/bin/sh'
- '-c'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:v4.6.1
image: k8s.gcr.io/kibana:v4.6.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ spec:
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
- image: k8s.gcr.io/heapster:v1.2.0
name: heapster
livenessProbe:
httpGet:
Expand All @@ -45,7 +45,7 @@ spec:
- --logtostderr=true
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster:v1.2.0
- image: k8s.gcr.io/heapster:v1.2.0
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
Expand All @@ -60,7 +60,7 @@ spec:
- --logtostderr=true
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:1.6
- image: k8s.gcr.io/addon-resizer:1.6
name: heapster-nanny
resources:
limits:
Expand Down Expand Up @@ -89,7 +89,7 @@ spec:
- --container=heapster
- --poll-period=300000
- --estimator=exponential
- image: gcr.io/google_containers/addon-resizer:1.6
- image: k8s.gcr.io/addon-resizer:1.6
name: eventer-nanny
resources:
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster_influxdb:v0.7
- image: k8s.gcr.io/heapster_influxdb:v0.7
name: influxdb
resources:
# keep request = limit to keep this container in guaranteed class
Expand All @@ -37,7 +37,7 @@ spec:
volumeMounts:
- name: influxdb-persistent-storage
mountPath: /data
- image: gcr.io/google_containers/heapster_grafana:v3.1.1
- image: k8s.gcr.io/heapster_grafana:v3.1.1
name: grafana
env:
resources:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.8
image: k8s.gcr.io/kubedns-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
Expand Down Expand Up @@ -83,7 +83,7 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.4
image: k8s.gcr.io/kube-dnsmasq-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.4
livenessProbe:
httpGet:
path: /healthz-dnsmasq
Expand All @@ -106,7 +106,7 @@ spec:
name: dns-tcp
protocol: TCP
- name: healthz
image: gcr.io/google_containers/exechealthz-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.2
image: k8s.gcr.io/exechealthz-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:1.2
resources:
limits:
memory: 50Mi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:v1.4.2
image: k8s.gcr.io/kubernetes-dashboard-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:v1.4.2
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:v1.1.0
image: k8s.gcr.io/kubernetes-dashboard-{{ (ansible_architecture == 'x86_64') | ternary('amd64', ansible_architecture) }}:v1.1.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ spec:
hostNetwork: true
containers:
- name: node-problem-detector
image: gcr.io/google_containers/node-problem-detector:v0.1
image: k8s.gcr.io/node-problem-detector:v0.1
env:
# Config the host ip and port of apiserver.
- name: "KUBERNETES_SERVICE_HOST"
Expand Down
2 changes: 1 addition & 1 deletion ansible/roles/node/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ kube_proxy_options:

kubelet_additional_options: []
# - "--host-network-sources=*"
# - "--pod-infra-container-image=gcr.io/google_containers/pause:2.0"
# - "--pod-infra-container-image=k8s.gcr.io/pause:2.0"

kube_proxy_additional_options: []

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions election/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ all: push
# 0.0 shouldn't clobber any released builds
# current latest is 0.5
TAG = 0.0
PREFIX = gcr.io/google_containers/leader-elector
PREFIX = staging-k8s.gcr.io/leader-elector

NODEJS_TAG = 0.1
NODEJS_PREFIX = gcr.io/google_containers/nodejs-election-client
NODEJS_PREFIX = staging-k8s.gcr.io/nodejs-election-client

server:
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' -o server example/main.go
Expand Down
4 changes: 2 additions & 2 deletions election/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Annotations - Every API object can be annotated with arbitrary key/value pairs t
Given these primitives, the code to use master election is relatively straightforward, and you can find it here. Let’s run it ourselves.

```console
$ kubectl run leader-elector --image=gcr.io/google_containers/leader-elector:0.5 --replicas=3 -- --election=example
$ kubectl run leader-elector --image=k8s.gcr.io/leader-elector:0.5 --replicas=3 -- --election=example
```

This creates a leader election set with 3 replicas:
Expand Down Expand Up @@ -53,7 +53,7 @@ The leader-election container provides a simple webserver that can serve on any
$ kubectl delete rc leader-elector

# create the new group, note the --http=localhost:4040 flag
$ kubectl run leader-elector --image=gcr.io/google_containers/leader-elector:0.5 --replicas=3 -- --election=example --http=0.0.0.0:4040
$ kubectl run leader-elector --image=k8s.gcr.io/leader-elector:0.5 --replicas=3 -- --election=example --http=0.0.0.0:4040

# create a proxy to your Kubernetes api server
$ kubectl proxy
Expand Down
4 changes: 2 additions & 2 deletions election/client/nodejs/pod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
name: elector-sidecar
spec:
containers:
- image: gcr.io/google_containers/nodejs-election-client:0.1
- image: k8s.gcr.io/nodejs-election-client:0.1
imagePullPolicy: IfNotPresent
name: nodejs
ports:
Expand All @@ -14,7 +14,7 @@ spec:
resources:
requests:
cpu: 100m
- image: gcr.io/google_containers/leader-elector:0.5
- image: k8s.gcr.io/leader-elector:0.5
imagePullPolicy: IfNotPresent
name: elector
args:
Expand Down
2 changes: 1 addition & 1 deletion exec-healthz/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ BIN := exechealthz
PKG := k8s.io/contrib/exec-healthz

# Where to push the docker image.
REGISTRY ?= gcr.io/google_containers
REGISTRY ?= staging-k8s.gcr.io

# Which architecture to build - see $(ALL_ARCH) for options.
ARCH ?= amd64
Expand Down
12 changes: 6 additions & 6 deletions exec-healthz/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@ How to build and push all images:
# Build for linux/amd64 (default)
$ make push TAG=1.0
$ make push TAG=1.0 ARCH=amd64
# ---> gcr.io/google_containers/exechealthz-amd64:1.0
# ---> staging-k8s.gcr.io/exechealthz-amd64:1.0

$ make push-legacy TAG=1.0 ARCH=amd64
# ---> gcr.io/google_containers/exechealthz:1.0 (image with backwards compatible naming)
# ---> staging-k8s.gcr.io/exechealthz:1.0 (image with backwards compatible naming)

$ make push TAG=1.0 ARCH=arm
# ---> gcr.io/google_containers/exechealthz-arm:1.0
# ---> staging-k8s.gcr.io/exechealthz-arm:1.0

$ make push TAG=1.0 ARCH=arm64
# ---> gcr.io/google_containers/exechealthz-arm64:1.0
# ---> staging-k8s.gcr.io/exechealthz-arm64:1.0

$ make push TAG=1.0 ARCH=ppc64le
# ---> gcr.io/google_containers/exechealthz-ppc64le:1.0
# ---> staging-k8s.gcr.io/exechealthz-ppc64le:1.0
```
Of course, if you don't want to push the images, just run `make` or `make container`

Expand Down Expand Up @@ -84,7 +84,7 @@ Create a pod.json that looks like:
"containers": [
{
"name": "healthz",
"image": "gcr.io/google_containers/exechealthz:1.0",
"image": "k8s.gcr.io/exechealthz:1.0",

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

except here... ?

"args": [
"-cmd=nslookup localhost"
],
Expand Down
2 changes: 1 addition & 1 deletion exec-healthz/pod.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"containers": [
{
"name": "healthz",
"image": "gcr.io/google_containers/exechealthz-amd64:1.2",
"image": "k8s.gcr.io/exechealthz-amd64:1.2",
"args": [
"-cmd=nslookup localhost"
],
Expand Down
2 changes: 1 addition & 1 deletion flannel-server/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ all: push

# 0.0 shouldn't clobber any released builds
TAG = 0.0
PREFIX = gcr.io/google_containers/flannel-server-helper
PREFIX = staging-k8s.gcr.io/flannel-server-helper

server: main.go
CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o flannel_helper ./main.go
Expand Down
2 changes: 1 addition & 1 deletion flannel-server/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ Mount your network configuration into the pod. Example configuration:

Pass the appropriate command line arguments to the flannel-server-helper, for example the rc in this directory has:
```yaml
- image: gcr.io/google_containers/flannel-server-helper:0.1
- image: k8s.gcr.io/flannel-server-helper:0.1
args:
- --network-config /network.json
- --etcd-prefix /kubernetes.io/network
Expand Down
2 changes: 1 addition & 1 deletion flannel-server/flannel-helper.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ spec:
initialDelaySeconds: 30
timeoutSeconds: 5
# Etcd stores flannel network/subnet configuration
- image: gcr.io/google_containers/etcd:2.2.1
- image: k8s.gcr.io/etcd:2.2.1
command:
- /bin/sh
- -c
Expand Down
2 changes: 1 addition & 1 deletion for-demos/proxy-to-service/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
.PHONY: all container push

TAG = v2
PREFIX = gcr.io/google_containers
PREFIX = staging-k8s.gcr.io
NAME = proxy-to-service

all: container
Expand Down
6 changes: 3 additions & 3 deletions for-demos/proxy-to-service/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ also can not choose to expose it on some nodes but not others. These things
will be fixed in the future, but until then, here is a stop-gap measure you can
use.

The container image `gcr.io/google_containers/proxy-to-service:v2` is a very
The container image `k8s.gcr.io/proxy-to-service:v2` is a very
small container that will do port-forwarding for you. You can use it to
forward a pod port or a host port to a service. Pods can choose any port or
host port, and are not limited in the same way Services are.
Expand All @@ -23,15 +23,15 @@ metadata:
spec:
containers:
- name: proxy-udp
image: gcr.io/google_containers/proxy-to-service:v2
image: k8s.gcr.io/proxy-to-service:v2
args: [ "udp", "53", "kube-dns.default", "1" ]
ports:
- name: udp
protocol: UDP
containerPort: 53
hostPort: 53
- name: proxy-tcp
image: gcr.io/google_containers/proxy-to-service:v2
image: k8s.gcr.io/proxy-to-service:v2
args: [ "tcp", "53", "kube-dns.default" ]
ports:
- name: tcp
Expand Down
2 changes: 1 addition & 1 deletion images/haproxy/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.


FROM gcr.io/google_containers/ubuntu-slim:0.3
FROM k8s.gcr.io/ubuntu-slim:0.3
MAINTAINER Prashanth B <[email protected]>

RUN apt-get update && apt-get install -y --no-install-recommends \
Expand Down
2 changes: 1 addition & 1 deletion images/haproxy/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ all: push

# 0.0.0 shouldn't clobber any released builds
TAG = 0.5
PREFIX = gcr.io/google_containers/haproxy
PREFIX = staging-k8s.gcr.io/haproxy
HAPROXY_IMAGE = haproxy

container:
Expand Down
2 changes: 1 addition & 1 deletion images/haproxy/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ This image does provides a default configuration file with no backend servers.

*Using docker*
```
$ docker run -v /some/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro gcr.io/google_containers/haproxy:0.5
$ docker run -v /some/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro k8s.gcr.io/haproxy:0.5
```

*Creating a pod*
Expand Down
2 changes: 1 addition & 1 deletion images/haproxy/pod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ metadata:
name: simple
spec:
containers:
- image: gcr.io/google_containers/haproxy:0.5
- image: k8s.gcr.io/haproxy:0.5
imagePullPolicy: Always
name: haproxy
command:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ spec:
spec:
containers:
- name: echoheaders
image: gcr.io/google_containers/echoserver:1.4
image: k8s.gcr.io/echoserver:1.4
ports:
- containerPort: 8080
readinessProbe:
Expand Down
2 changes: 1 addition & 1 deletion ingress/controllers/gce/examples/https/tls-app.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ spec:
spec:
containers:
- name: echoheaders-https
image: gcr.io/google_containers/echoserver:1.3
image: k8s.gcr.io/echoserver:1.3
ports:
- containerPort: 8080
---
Expand Down
2 changes: 1 addition & 1 deletion ingress/controllers/nginx/examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
All the examples references the services `echoheaders-x` and `echoheaders-y`

```
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.4 --replicas=1 --port=8080
kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y
```
Loading