Skip to content

Commit

Permalink
Merge pull request #36 from ucloud/v0.2.2
Browse files Browse the repository at this point in the history
V0.2.2
  • Loading branch information
gaopenghigh authored Mar 26, 2020
2 parents ef9da40 + 9bc4ace commit 6507405
Show file tree
Hide file tree
Showing 29 changed files with 520 additions and 268 deletions.
2 changes: 2 additions & 0 deletions deploy/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ spec:
value: ""
- name: S3_BUCKET
value: ""
- name: CLUSTER_DOMAIN
value: ""

---
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
5 changes: 4 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,24 @@ require (
github.com/appscode/go v0.0.0-20191006073906-e3d193d493fc
github.com/appscode/osm v0.12.0
github.com/aws/aws-sdk-go v1.20.20
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/go-logr/logr v0.1.0
github.com/go-openapi/spec v0.19.2
github.com/go-redis/redis v6.15.7+incompatible
github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
github.com/operator-framework/operator-sdk v0.13.0
github.com/pkg/errors v0.8.1
github.com/satori/go.uuid v1.2.0
github.com/spf13/pflag v1.0.5
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
gomodules.xyz/stow v0.2.3
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/client-go v12.0.0+incompatible
k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d
k8s.io/kubernetes v1.16.2
kmodules.xyz/constants v0.0.0-20191024095500-cd4313df4aa6
kmodules.xyz/objectstore-api v0.0.0-20191014210450-ac380fa650a3
sigs.k8s.io/controller-runtime v0.4.0
)
Expand Down
30 changes: 30 additions & 0 deletions go.sum

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion hack/docker/redis-tools/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ RUN set -x \
zip \
&& rm -rf /var/lib/apt/lists/* /usr/share/doc /usr/share/man /tmp/*

COPY osm /usr/local/bin/osm
COPY rclone /usr/local/bin/rclone
COPY redis-tools.sh /usr/local/bin/redis-tools.sh
RUN chmod +x /usr/local/bin/redis-tools.sh

Expand Down
16 changes: 8 additions & 8 deletions hack/docker/redis-tools/make.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,27 +17,27 @@ IMG=redis-tools
DB_VERSION=5.0.4
TAG="$DB_VERSION"

OSM_VER=${OSM_VER:-v1.50.2}
RCLONE_VER=${RCLONE_VER:-v1.50.2}

DIST=$REPO_ROOT/dist
mkdir -p $DIST

build() {
pushd "$REPO_ROOT/hack/docker/redis-tools"

if [ ! -f "osm" ]; then
if [ ! -f "rclone" ]; then
# Download rclone
wget https://downloads.rclone.org/"${OSM_VER}"/rclone-"${OSM_VER}"-linux-amd64.zip
unzip rclone-"${OSM_VER}"-linux-amd64.zip
chmod +x rclone-"${OSM_VER}"-linux-amd64/rclone
mv rclone-"${OSM_VER}"-linux-amd64/rclone osm
wget https://downloads.rclone.org/"${RCLONE_VER}"/rclone-"${RCLONE_VER}"-linux-amd64.zip
unzip rclone-"${RCLONE_VER}"-linux-amd64.zip
chmod +x rclone-"${RCLONE_VER}"-linux-amd64/rclone
mv rclone-"${RCLONE_VER}"-linux-amd64/rclone rclone
fi

local cmd="docker build --pull -t $DOCKER_REGISTRY/$IMG:$TAG ."
echo $cmd; $cmd

rm -rf rclone-"${OSM_VER}"-linux-amd64*
rm osm
rm -rf rclone-"${RCLONE_VER}"-linux-amd64*
rm rclone
popd
}

Expand Down
14 changes: 10 additions & 4 deletions hack/docker/redis-tools/redis-tools.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ show_help() {
echo " --host=HOST database host"
echo " --user=USERNAME database username"
echo " --bucket=BUCKET name of bucket"
echo " --location=LOCATION location of backend (<provider>:<bucket name>)"
echo " --folder=FOLDER name of folder in bucket"
echo " --snapshot=SNAPSHOT name of snapshot"
}
Expand All @@ -25,11 +26,12 @@ REDIS_PORT=${REDIS_PORT:-6379}
REDIS_USER=${REDIS_USER:-}
REDIS_PASSWORD=${REDIS_PASSWORD:-}
REDIS_BUCKET=${REDIS_BUCKET:-}
REDIS_LOCATION=${REDIS_LOCATION:-}
REDIS_FOLDER=${REDIS_FOLDER:-}
REDIS_SNAPSHOT=${REDIS_SNAPSHOT:-}
REDIS_DATA_DIR=${REDIS_DATA_DIR:-/data}
REDIS_RESTORE_SUCCEEDED=${REDIS_RESTORE_SUCCEEDED:-0}
OSM_CONFIG_FILE=/etc/osm/config
RCLONE_CONFIG_FILE=/etc/rclone/config

op=$1
shift
Expand All @@ -56,6 +58,10 @@ while test $# -gt 0; do
export REDIS_BUCKET=$(echo $1 | sed -e 's/^[^=]*=//g')
shift
;;
--location*)
export REDIS_LOCATION=$(echo $1 | sed -e 's/^[^=]*=//g')
shift
;;
--folder*)
export REDIS_FOLDER=$(echo $1 | sed -e 's/^[^=]*=//g')
shift
Expand Down Expand Up @@ -108,7 +114,7 @@ case "$op" in
ls -lh "$SOURCE_DIR"
echo "Uploading dump file to the backend......."
echo "From $SOURCE_DIR"
osm --config "$OSM_CONFIG_FILE" copy "$SOURCE_DIR" ceph:"$REDIS_BUCKET"/"$REDIS_FOLDER/$REDIS_SNAPSHOT" -v
rclone --config "$RCLONE_CONFIG_FILE" copy "$SOURCE_DIR" "$REDIS_LOCATION"/"$REDIS_FOLDER/$REDIS_SNAPSHOT" -v

echo "Backup successful"
;;
Expand All @@ -120,9 +126,9 @@ case "$op" in
fi
index=$(echo "${POD_NAME}" | awk -F- '{print $(NF-1)}')
REDIS_SNAPSHOT=${REDIS_SNAPSHOT}-${index}
SOURCE_SNAPSHOT="$REDIS_BUCKET"/"$REDIS_FOLDER/$REDIS_SNAPSHOT"
SOURCE_SNAPSHOT="$REDIS_LOCATION"/"$REDIS_FOLDER/$REDIS_SNAPSHOT"
echo "From $SOURCE_SNAPSHOT"
osm --config "$OSM_CONFIG_FILE" sync ceph:"$SOURCE_SNAPSHOT" "$REDIS_DATA_DIR" -v
rclone --config "$RCLONE_CONFIG_FILE" sync "$SOURCE_SNAPSHOT" "$REDIS_DATA_DIR" -v

echo "Recovery successful"
;;
Expand Down
5 changes: 5 additions & 0 deletions hack/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ if [[ -z ${STORAGECLASSNAME} ]]; then
exit 1
fi

if [[ -z ${CLUSTER_DOMAIN} ]]; then
echo "env CLUSTER_DOMAIN not set"
exit 1
fi

if [[ -z ${GINKGO_SKIP} ]]; then
export GINKGO_SKIP=""
fi
Expand Down
11 changes: 11 additions & 0 deletions pkg/apis/redis/v1alpha1/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,17 @@ const (
NodesPlacementInfoOptimal NodesPlacementInfo = "Optimal"
)

type RestorePhase string

const (
// RestorePhaseRunning used for Restore that are currently running.
RestorePhaseRunning RestorePhase = "Running"
// RestorePhaseRestart used for Restore that are restart master nodes.
RestorePhaseRestart RestorePhase = "Restart"
// RestorePhaseSucceeded used for Restore that are Succeeded.
RestorePhaseSucceeded RestorePhase = "Succeeded"
)

const (
DatabaseNamePrefix = "redis"

Expand Down
24 changes: 20 additions & 4 deletions pkg/apis/redis/v1alpha1/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,19 @@ func (in *DistributedRedisCluster) IsRestoreFromBackup() bool {
}

func (in *DistributedRedisCluster) IsRestored() bool {
return in.Status.Restore.RestoreSucceeded > 0
return in.Status.Restore.Phase == RestorePhaseSucceeded
}

func (in *DistributedRedisCluster) ShouldInitRestorePhase() bool {
return in.Status.Restore.Phase == ""
}

func (in *DistributedRedisCluster) IsRestoreRunning() bool {
return in.Status.Restore.Phase == RestorePhaseRunning
}

func (in *DistributedRedisCluster) IsRestoreRestarting() bool {
return in.Status.Restore.Phase == RestorePhaseRestart
}

func defaultResource() *v1.ResourceRequirements {
Expand Down Expand Up @@ -114,7 +126,7 @@ func (in *RedisClusterBackup) Validate() error {
return nil
}

func (in *RedisClusterBackup) Location() (string, error) {
func (in *RedisClusterBackup) RemotePath() (string, error) {
spec := in.Spec.Backend
timePrefix := in.Status.StartTime.Format("20060102150405")
if spec.S3 != nil {
Expand All @@ -131,10 +143,14 @@ func (in *RedisClusterBackup) Location() (string, error) {
return "", fmt.Errorf("no storage provider is configured")
}

func (in *RedisClusterBackup) OSMSecretName() string {
return fmt.Sprintf("osmconfig-%v", in.Name)
func (in *RedisClusterBackup) RCloneSecretName() string {
return fmt.Sprintf("rcloneconfig-%v", in.Name)
}

func (in *RedisClusterBackup) JobName() string {
return fmt.Sprintf("redisbackup-%v", in.Name)
}

func (in *RedisClusterBackup) IsRefLocalPVC() bool {
return in.Spec.Local != nil && in.Spec.Local.PersistentVolumeClaim != nil
}
6 changes: 2 additions & 4 deletions pkg/apis/redis/v1alpha1/distributedrediscluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,8 @@ type DistributedRedisClusterStatus struct {
}

type Restore struct {
// The number of restore which reached phase Succeeded.
RestoreSucceeded int32 `json:"restoreSucceeded,omitempty"`
Backup *RedisClusterBackup `json:"backup, omitempty"`
//BackupSourceSpec `json:",inline"`
Phase RestorePhase `json:"phase,omitempty"`
Backup *RedisClusterBackup `json:"backup, omitempty"`
}

// RedisClusterNode represent a RedisCluster Node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,10 +249,35 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request)
return reconcile.Result{}, err
}

// update cr and wait for the next Reconcile loop
if instance.IsRestoreFromBackup() && !instance.IsRestored() {
// mark .Status.Restore.Phase = RestorePhaseRestart, will
// remove init container and restore volume that referenced in stateulset for
// dump RDB file from backup, then the redis master node will be restart.
if instance.IsRestoreFromBackup() && instance.IsRestoreRunning() {
reqLogger.Info("update restore redis cluster cr")
instance.Status.Restore.RestoreSucceeded = 1
instance.Status.Restore.Phase = redisv1alpha1.RestorePhaseRestart
if err := r.crController.UpdateCRStatus(instance); err != nil {
return reconcile.Result{}, err
}
if err := r.ensurer.UpdateRedisStatefulsets(instance, getLabels(instance)); err != nil {
return reconcile.Result{}, err
}
waiter := &waitStatefulSetUpdating{
name: "waitMasterNodeRestarting",
timeout: 60 * time.Second,
tick: 5 * time.Second,
statefulSetController: r.statefulSetController,
cluster: instance,
}
if err := waiting(waiter, ctx.reqLogger); err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{Requeue: true}, nil
}

// restore succeeded, then update cr and wait for the next Reconcile loop
if instance.IsRestoreFromBackup() && instance.IsRestoreRestarting() {
reqLogger.Info("update restore redis cluster cr")
instance.Status.Restore.Phase = redisv1alpha1.RestorePhaseSucceeded
if err := r.crController.UpdateCRStatus(instance); err != nil {
return reconcile.Result{}, err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/distributedrediscluster/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ func compareStatus(old, new *redisv1alpha1.DistributedRedisClusterStatus, reqLog
return true
}

if utils.CompareInt32("restoreSucceeded", old.Restore.RestoreSucceeded, new.Restore.RestoreSucceeded, reqLogger) {
if utils.CompareStringValue("restoreSucceeded", string(old.Restore.Phase), string(new.Restore.Phase), reqLogger) {
return true
}

Expand Down
27 changes: 18 additions & 9 deletions pkg/controller/distributedrediscluster/sync_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type syncContext struct {

func (r *ReconcileDistributedRedisCluster) ensureCluster(ctx *syncContext) error {
cluster := ctx.cluster
if err := r.validate(cluster, ctx.reqLogger); err != nil {
if err := r.validateAndSetDefault(cluster, ctx.reqLogger); err != nil {
if k8sutil.IsRequestRetryable(err) {
return Kubernetes.Wrap(err, "Validate")
}
Expand Down Expand Up @@ -69,9 +69,9 @@ func (r *ReconcileDistributedRedisCluster) ensureCluster(ctx *syncContext) error
if err := r.ensurer.EnsureRedisSvc(cluster, labels); err != nil {
return Kubernetes.Wrap(err, "EnsureRedisSvc")
}
if err := r.ensurer.EnsureRedisOSMSecret(cluster, labels); err != nil {
if err := r.ensurer.EnsureRedisRCloneSecret(cluster, labels); err != nil {
if k8sutil.IsRequestRetryable(err) {
return Kubernetes.Wrap(err, "EnsureRedisOSMSecret")
return Kubernetes.Wrap(err, "EnsureRedisRCloneSecret")
}
return StopRetry.Wrap(err, "stop retry")
}
Expand All @@ -89,16 +89,22 @@ func (r *ReconcileDistributedRedisCluster) waitPodReady(ctx *syncContext) error
return nil
}

func (r *ReconcileDistributedRedisCluster) validate(cluster *redisv1alpha1.DistributedRedisCluster, reqLogger logr.Logger) error {
func (r *ReconcileDistributedRedisCluster) validateAndSetDefault(cluster *redisv1alpha1.DistributedRedisCluster, reqLogger logr.Logger) error {
var update bool
var err error

if cluster.IsRestoreFromBackup() && !cluster.IsRestored() {
update, err = r.validateRestore(cluster, reqLogger)
if cluster.IsRestoreFromBackup() && cluster.ShouldInitRestorePhase() {
update, err = r.initRestore(cluster, reqLogger)
if err != nil {
return err
}
}

if cluster.IsRestoreFromBackup() && (cluster.IsRestoreRunning() || cluster.IsRestoreRestarting()) {
// Set ClusterReplicas = 0, only start master node in first reconcile loop when do restore
cluster.Spec.ClusterReplicas = 0
}

updateDefault := cluster.DefaultSpec(reqLogger)
if update || updateDefault {
return r.crController.UpdateCR(cluster)
Expand All @@ -116,7 +122,7 @@ func dbLoadedFromDiskWhenRestore(cluster *redisv1alpha1.DistributedRedisCluster,
}
}

func (r *ReconcileDistributedRedisCluster) validateRestore(cluster *redisv1alpha1.DistributedRedisCluster, reqLogger logr.Logger) (bool, error) {
func (r *ReconcileDistributedRedisCluster) initRestore(cluster *redisv1alpha1.DistributedRedisCluster, reqLogger logr.Logger) (bool, error) {
update := false
if cluster.Status.Restore.Backup == nil {
initSpec := cluster.Spec.Init
Expand All @@ -130,6 +136,10 @@ func (r *ReconcileDistributedRedisCluster) validateRestore(cluster *redisv1alpha
return update, fmt.Errorf("backup is still running")
}
cluster.Status.Restore.Backup = backup
cluster.Status.Restore.Phase = redisv1alpha1.RestorePhaseRunning
if err := r.crController.UpdateCRStatus(cluster); err != nil {
return update, err
}
}
backup := cluster.Status.Restore.Backup
if cluster.Spec.Image == "" {
Expand All @@ -140,8 +150,7 @@ func (r *ReconcileDistributedRedisCluster) validateRestore(cluster *redisv1alpha
cluster.Spec.MasterSize = backup.Status.MasterSize
update = true
}
// Set ClusterReplicas = 0, only start master node in first reconcile loop when do restore
cluster.Spec.ClusterReplicas = 0

return update, nil
}

Expand Down
Loading

0 comments on commit 6507405

Please sign in to comment.