Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added flags to delete all dependent resources and configurations with… #437

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions cmd/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,11 @@ func init() {
kubernetesNodePoolInstanceListCmd.Flags().StringVarP(&nodePoolID, "node-pool-id", "p", "", "the ID of the node pool.")

kubernetesNodePoolCmd.AddCommand(kubernetesNodePoolListCmd)

// Define the flags for the kubernetesRemoveCmd
kubernetesRemoveCmd.Flags().BoolVar(&deleteVolumes, "delete-volumes", false, "Delete dependent volumes")
kubernetesRemoveCmd.Flags().BoolVar(&keepFirewalls, "keep-firewalls", false, "Keep dependent firewalls")
kubernetesRemoveCmd.Flags().BoolVar(&keepKubeconfig, "keep-kubeconfig", false, "Keep kubeconfig")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure if we should be removing/managing kubeconfig context on behalf of the user. Not to mention that isn't backwards compatible. I don't know why a user would want to keep a kubeconfig of a deleted cluster in the first place. cc: @fernando-villalba

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So should I remove this flag ?
@RealHarshThakur @fernando-villalba @uzaxirr

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know why a user would want to keep a kubeconfig of a deleted cluster in the first place

Exactly. Deleting the kubeconfig configuration automatically with a CLI tool when the cluster is deleted is extremely commong, the gcp cli does it, kind does it. It's extremely convenient and it just makes sense to do so.

}

func getKubernetesList(value string) []string {
Expand Down
127 changes: 98 additions & 29 deletions cmd/kubernetes/kubernetes_remove.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,15 @@ import (
"github.com/spf13/cobra"
)

var kuberneteList []utility.ObjecteList
var (
deleteVolumes bool // Flag to delete dependent volumes
keepFirewalls bool // Flag to keep dependent firewalls
keepKubeconfig bool // Flag to keep kubeconfig
)

var kuberneteList []utility.ObjecteList // List to store the Kubernetes clusters to be deleted

// Command definition for removing a Kubernetes cluster
var kubernetesRemoveCmd = &cobra.Command{
Use: "remove",
Aliases: []string{"rm", "delete", "destroy"},
Expand All @@ -30,6 +38,7 @@ var kubernetesRemoveCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
utility.EnsureCurrentRegion()

// Create a client to interact with the Civo API
client, err := config.CivoAPIClient()
if common.RegionSet != "" {
client.Region = common.RegionSet
Expand All @@ -39,11 +48,12 @@ var kubernetesRemoveCmd = &cobra.Command{
os.Exit(1)
}

if len(args) == 1 {
kubernetesCluster, err := client.FindKubernetesCluster(args[0])
// Find and store the Kubernetes clusters to be deleted
for _, clusterName := range args {
kubernetesCluster, err := client.FindKubernetesCluster(clusterName)
if err != nil {
if errors.Is(err, civogo.ZeroMatchesError) {
utility.Error("sorry there is no %s Kubernetes cluster in your account", utility.Red(args[0]))
utility.Error("sorry there is no %s Kubernetes cluster in your account", utility.Red(clusterName))
os.Exit(1)
}
if errors.Is(err, civogo.MultipleMatchesError) {
Expand All @@ -52,53 +62,112 @@ var kubernetesRemoveCmd = &cobra.Command{
}
}
kuberneteList = append(kuberneteList, utility.ObjecteList{ID: kubernetesCluster.ID, Name: kubernetesCluster.Name})
} else {
for _, v := range args {
kubernetesCluster, err := client.FindKubernetesCluster(v)
if err == nil {
kuberneteList = append(kuberneteList, utility.ObjecteList{ID: kubernetesCluster.ID, Name: kubernetesCluster.Name})
}
}
}

// Collect names of the clusters to be deleted for confirmation message
kubernetesNameList := []string{}
for _, v := range kuberneteList {
kubernetesNameList = append(kubernetesNameList, v.Name)
}

var volsNameList []string
for _, v := range kuberneteList {
vols, err := client.ListVolumesForCluster(v.ID)
if err != nil {
utility.Error("error getting the list of dangling volumes: %s", err)
os.Exit(1)
}
for _, v := range vols {
volsNameList = append(volsNameList, v.Name)
}
if vols != nil {
utility.YellowConfirm("There are volume(s) attached to this cluster. Consider deleting or detaching these before deleting the cluster:\n%s\n", utility.Green(strings.Join(volsNameList, "\n")))
}
}

// Confirm the deletion with the user
if utility.UserConfirmedDeletion(fmt.Sprintf("Kubernetes %s", pluralize.Pluralize(len(kuberneteList), "cluster")), common.DefaultYes, strings.Join(kubernetesNameList, ", ")) {

for _, v := range kuberneteList {
_, err = client.DeleteKubernetesCluster(v.ID)

// Delete the Kubernetes cluster
_, err := client.DeleteKubernetesCluster(v.ID)
if err != nil {
utility.Error("error deleting the kubernetes cluster: %s", err)
os.Exit(1)
}

/* Poll for the deletion status
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should fix this on API side rather than client side so the clients don't have to poll like this.. cc : @uzaxirr

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please correct me if I am wrong @RealHarshThakur @uzaxirr @fernando-villalba

I believe the current API logic is appropriate. The API call should end after triggering the deletion of the cluster, allowing the client-side polling mechanism to take over. This approach ensures that the API call is quick and responsive. Typically, the polling mechanism requires only 1-2 attempts with a good internet connection and up to 3 attempts with a weaker connection to confirm the deletion status.

If we move the polling logic to the API side, the API call might take longer to complete and could potentially result in timeouts or errors, especially under poor network conditions. This could negatively impact user experience and increase the complexity of the API.

I look forward to your thoughts and feedback. Please let me know what I can improve.

This is required because, if we try to delete other things like firewalls before
the cluster is completely deleted, we will encounter errors like "database_firewall_inuse_by_cluster".
*/
for {
_, err := client.FindKubernetesCluster(v.Name)
if err != nil {
if errors.Is(err, civogo.ZeroMatchesError) {
break // Cluster is deleted
}
}
}

// Delete volumes if --delete-volumes flag is set
if deleteVolumes {
volumes, err := client.ListVolumesForCluster(v.ID)
if err != nil {
if !errors.Is(err, civogo.ZeroMatchesError) {
utility.Error("Error listing volumes for cluster %s: %s", v.Name, err)
}
} else {
for _, volume := range volumes {
_, err := client.DeleteVolume(volume.ID)
if err != nil {
utility.Error("Error deleting volume %s: %s", volume.Name, err)
}
fmt.Printf("%s volume deleted", volume.ID)
}
}
}

// Output volumes left behind if deleteVolumes flag is not set
if !deleteVolumes {
volumes, err := client.ListVolumesForCluster(v.ID)
if err != nil {
if !errors.Is(err, civogo.ZeroMatchesError) {
utility.Error("Error listing volumes for cluster %s: %s", v.Name, err)
}
} else if len(volumes) > 0 {
fmt.Fprintf(os.Stderr, "Volumes left behind for Kubernetes cluster %s:\n", v.Name)
for _, volume := range volumes {
fmt.Fprintf(os.Stderr, "- %s\n", volume.ID)
}
fmt.Fprintf(os.Stderr, "Consider using '--delete-volumes' flag next time to delete them automatically.\n")
}
}

// Delete firewalls if --keep-firewalls flag is not set
if !keepFirewalls {
firewall, err := client.FindFirewall(v.Name)
if err != nil {
if errors.Is(err, civogo.MultipleMatchesError) {
utility.Error("Error deleting the firewall: %v. Please delete the firewall manually by visiting: https://dashboard.civo.com/firewalls", err)
} else {
utility.Error("Error finding the firewall: %v", err)
}
} else if firewall.ClusterCount == 0 {
_, err := client.DeleteFirewall(firewall.ID)
if err != nil {
utility.Error("Error deleting firewall %s: %v", firewall.Name, err)
} else {
fmt.Fprintf(os.Stderr, "Firewall %s deleted. If you want to keep the firewall in the future, please use '--keep-firewalls' flag.\n", firewall.ID)
}
}
}

// Delete kubeconfig if --keep-kubeconfig flag is not set
if !keepKubeconfig {
kubeconfigPath := fmt.Sprintf("%s/.kube/config", os.Getenv("HOME"))

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you should not delete kubeconfig file because it may contain other clusters data even from different cloud providers.

err := os.Remove(kubeconfigPath)
if err != nil && !os.IsNotExist(err) {
utility.Error("Error deleting kubeconfig: %s", err)
} else {
fmt.Fprintf(os.Stderr, "Kubeconfig file deleted. If you want to keep the kubeconfig in the future, please use '--keep-kubeconfig' flag.\n")
}
}
}

// Output the result of the deletion
ow := utility.NewOutputWriter()

for _, v := range kuberneteList {
ow.StartLine()
ow.AppendDataWithLabel("id", v.ID, "ID")
ow.AppendDataWithLabel("name", v.Name, "Name")
}

// Format the output based on the selected format
switch common.OutputFormat {
case "json":
if len(kuberneteList) == 1 {
Expand Down