diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..f6d74154 --- /dev/null +++ b/.gitignore @@ -0,0 +1,23 @@ +# Ignore all +* + +# Unignore all with extensions +!*.* + +# Unignore all dirs +!*/ + +# Ignore terraform plan, variable, backup and state files +terraform.tfplan +terraform.tfstate +terraform.tfvars +*.backup +*.tfstate.* + +# Ignore terraform hidden folder +/.terraform + +# General +.DS_STORE +/.idea +/.vscode diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..f982f589 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,26 @@ +issues: + max-per-linter: 0 + max-same-issues: 0 + +linters: + disable-all: true + enable: + - durationcheck + - errcheck + - exportloopref + - forcetypeassert + - godot + - gofmt + - gosimple + - gosec + - ineffassign + - makezero + - misspell + - nilerr + - predeclared + - staticcheck + - tenv + - unconvert + - unparam + - unused + - vet diff --git a/README.md b/README.md index dc0cdb5e..5048fc22 100644 --- a/README.md +++ b/README.md @@ -1 +1,208 @@ -# Capella Terraform Provider +# Terraform Provider Capella + +This is the repository for Couchbase's Terraform-Provider-Capella which forms a Terraform plugin for use with Couchbase Capella. + +## Requirements + +- [Terraform](https://www.terraform.io/downloads.html) >= 1.0 +- [Go](https://golang.org/doc/install) >= 1.20 + +## Using the provider + +### Prepare Terraform for local provider install + +Terraform installs providers and verifies their versions and checksums when you run `terraform init`. Terraform will download your +providers from either the provider registry or a local registry. However, while building your provider you will want to +test a Terraform configuration against a local development build of the provider. The development build will not have an associated +version number or an official set of checksums listed in a provider registry. + +Terraform allows you to use local provider builds by setting a dev_overrides block in a configuration file called .terraformrc. +This block overrides all other configured installation methods. + +Terraform searches for the .terraformrc file in your home directory and applies any configuration settings you set. + +#### Create the terraform configuration file +Create a new file called .terraformrc in your home directory (~), then add the dev_overrides block below. + +```shell +provider_installation { + +dev_overrides { +"hashicorp.com/couchabasecloud/capella" = "" +} + +# For all other providers, install them directly from their origin provider +# registries as normal. If you omit this, Terraform will _only_ use +# the dev_overrides block, and so no other providers will be available. +direct {} +} +``` + +`` should be replaced with the directory corresponding to the terraform provider binary. +This could be the default folder where Go installs your binaries, which can be determined by calling: + +```shell +go env GOBIN +``` + +Alternatively, it may be the default path which is of the form: +``` shell +/Users//go/bin +``` + +#### Build and install the executable + +Now build the terraform provider. Ensure to specify the build location using `` as described above. + +`go build -o ` + +### Authentication + +In order to set up authentication with the Couchbase Capella provider a V4 API key must be generated. + +To find out how to generate a V4 API Key, please see the following document: +https://docs.couchbase.com/cloud/management-api-guide/management-api-start.html + +### Terraform Environment Variables + +Environment variables can be set by terraform by creating and adding terraform.template.tfvars +```terraform +auth_token = "" +organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" +host = "https://cloudapi.cloud.couchbase.com" +``` + +A variables.tf should also be added to define the variables for terraform. +```terraform +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" +} +``` + +Set the environment variables by using the following notation: +```terraform +resource "capella_project" "example" { + organization_id = var.organization_id + name = var.project_name + description = "A Capella Project that will host many Capella clusters." +} +``` + +Alternatively, if you would like to set environment variables locally on your system (as opposed to using terraform.template.tfvars), +preface them with `TF_VAR_`. Terraform will then apply them your .terraformrc file on running +`terraform apply`. For example: +```bash +export TF_VAR_auth_token= +export TF_VAR_organization_id= +export TF_VAR_host= "https://cloudapi.cloud.couchbase.com" +``` + +### Create and manage resources using terraform + +#### Example Usage + +Note: You will need to provide both the url of the capella host as well as your V4 API secret for authentication. + +```terraform +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = "the host url of couchbase cloud" + authentication_token = "capella authentication token" +} + + +resource "capella_project" "example" { + organization_id = "ffffffff-aaaa-1414-eeee-000000000000" + name = "example-name" + description = "example-description" +} + +output "example_project" { + value = capella_project.example +} +``` + +This repository contains a number of example directories containing examples of Hashicorp Configuration Language (HCL) code +being used to create and manage Capella resources. To try these examples out for yourself, change into one of them and run +the below commands. + +#### Commands + +#### n.b. Terraform Init + +Ordinarily, terraform will downloaded the requested providers on running the command: +```bash +$ terraform init +``` +If you are working with a local install of `Terraform-Provider-Capella` provider, this step is not needed and considered optional. +However if you plan to use any other providers at the same time it may need to be ran. + +**1\. Review the Terraform plan** + +Execute the following command to automatically review and update the formatting of .tf files. +```bash +$ terraform fmt +``` + +Execute the following command to review the resources that will be deployed. + +```bash +$ terraform plan -var-file=terraform.template.tfvars +``` +NOTE: If using a terraform.template.tfvars file to specify variables, then the -var-file flag will need to be used. +If instead, variables are set either using a terraform.tfvars file or by using TF_VAR_ prefaced environment variables, +then the -var-file flag can be omitted. This also applies for `terraform apply`. + +**2\. Execute the Terraform apply** + +Execute the plan to deploy the Couchbase Capella resources. + +```bash +$ terraform apply -var-file=terraform.template.tfvars +``` + +**3\. Destroy the resources** + +Execute the following command to destroy all the resources. + +```bash +$ terraform destroy +``` + +To destroy specific resource + +```bash +$ terraform destroy -target=RESOURCE_ADDRESS +``` +Example + +```bash +$ terraform destroy -target=capella_project.example +``` + +**4\. To refresh the state file to sync with the remote** + +```bash +$ terraform apply --refresh-only +``` + +**5\. To import remote resource** + +```bash +$ terraform import RESOURCE_TYPE.NAME RESOURCE_IDENTIFIER +``` diff --git a/docs/data-sources/allowlist.md b/docs/data-sources/allowlist.md new file mode 100644 index 00000000..ad044f6c --- /dev/null +++ b/docs/data-sources/allowlist.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_allowlist Data Source - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_allowlist (Data Source) + + + + + + +## Schema + +### Required + +- `cluster_id` (String) +- `organization_id` (String) +- `project_id` (String) + +### Read-Only + +- `data` (Attributes List) (see [below for nested schema](#nestedatt--data)) + + +### Nested Schema for `data` + +Optional: + +- `if_match` (String) + +Read-Only: + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--data--audit)) +- `cidr` (String) +- `cluster_id` (String) +- `comment` (String) +- `expires_at` (String) +- `id` (String) +- `organization_id` (String) +- `project_id` (String) + + +### Nested Schema for `data.audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/data-sources/allowlists.md b/docs/data-sources/allowlists.md new file mode 100644 index 00000000..d6b1f3b6 --- /dev/null +++ b/docs/data-sources/allowlists.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_allowlists Data Source - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_allowlists (Data Source) + + + + + + +## Schema + +### Required + +- `cluster_id` (String) +- `organization_id` (String) +- `project_id` (String) + +### Read-Only + +- `data` (Attributes List) (see [below for nested schema](#nestedatt--data)) + + +### Nested Schema for `data` + +Optional: + +- `if_match` (String) + +Read-Only: + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--data--audit)) +- `cidr` (String) +- `cluster_id` (String) +- `comment` (String) +- `expires_at` (String) +- `id` (String) +- `organization_id` (String) +- `project_id` (String) + + +### Nested Schema for `data.audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/data-sources/projects.md b/docs/data-sources/projects.md new file mode 100644 index 00000000..08f0a315 --- /dev/null +++ b/docs/data-sources/projects.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_projects Data Source - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_projects (Data Source) + + + + + + +## Schema + +### Required + +- `organization_id` (String) + +### Read-Only + +- `data` (Attributes List) (see [below for nested schema](#nestedatt--data)) + + +### Nested Schema for `data` + +Optional: + +- `if_match` (String) + +Read-Only: + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--data--audit)) +- `description` (String) +- `etag` (String) +- `id` (String) +- `name` (String) +- `organization_id` (String) + + +### Nested Schema for `data.audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..b215bb63 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,21 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella Provider" +subcategory: "" +description: |- + +--- + +# capella Provider + + + + + + +## Schema + +### Required + +- `authentication_token` (String, Sensitive) Capella API Token that serves as an authentication mechanism. +- `host` (String) Capella Public API HTTPS Host URL diff --git a/docs/resources/Cluster.md b/docs/resources/Cluster.md new file mode 100644 index 00000000..9f075261 --- /dev/null +++ b/docs/resources/Cluster.md @@ -0,0 +1,127 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_cluster Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_cluster (Resource) + + + + + + +## Schema + +### Required + +- `availability` (Attributes) (see [below for nested schema](#nestedatt--availability)) +- `cloud_provider` (Attributes) (see [below for nested schema](#nestedatt--cloud_provider)) +- `name` (String) +- `organization_id` (String) +- `project_id` (String) +- `service_groups` (Attributes List) (see [below for nested schema](#nestedatt--service_groups)) +- `support` (Attributes) (see [below for nested schema](#nestedatt--support)) + +### Optional + +- `app_service_id` (String) +- `couchbase_server` (Attributes) (see [below for nested schema](#nestedatt--couchbase_server)) +- `description` (String) +- `if_match` (String) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `current_state` (String) +- `etag` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `availability` + +Required: + +- `type` (String) + + + +### Nested Schema for `cloud_provider` + +Required: + +- `cidr` (String) +- `region` (String) +- `type` (String) + + + +### Nested Schema for `service_groups` + +Required: + +- `node` (Attributes) (see [below for nested schema](#nestedatt--service_groups--node)) +- `num_of_nodes` (Number) +- `services` (List of String) + + +### Nested Schema for `service_groups.node` + +Required: + +- `compute` (Attributes) (see [below for nested schema](#nestedatt--service_groups--node--compute)) +- `disk` (Attributes) (see [below for nested schema](#nestedatt--service_groups--node--disk)) + + +### Nested Schema for `service_groups.node.compute` + +Required: + +- `cpu` (Number) +- `ram` (Number) + + + +### Nested Schema for `service_groups.node.disk` + +Required: + +- `type` (String) + +Optional: + +- `iops` (Number) +- `storage` (Number) + + + + + +### Nested Schema for `support` + +Required: + +- `plan` (String) +- `timezone` (String) + + + +### Nested Schema for `couchbase_server` + +Optional: + +- `version` (String) + + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/resources/allowlist.md b/docs/resources/allowlist.md new file mode 100644 index 00000000..cc72bad9 --- /dev/null +++ b/docs/resources/allowlist.md @@ -0,0 +1,45 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_allowlist Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_allowlist (Resource) + + + + + + +## Schema + +### Required + +- `cidr` (String) +- `cluster_id` (String) +- `organization_id` (String) +- `project_id` (String) + +### Optional + +- `comment` (String) +- `expires_at` (String) +- `if_match` (String) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `id` (String) The ID of this resource. + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/resources/apikey.md b/docs/resources/apikey.md new file mode 100644 index 00000000..eb7bc17a --- /dev/null +++ b/docs/resources/apikey.md @@ -0,0 +1,59 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_apikey Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_apikey (Resource) + + + + + + +## Schema + +### Required + +- `name` (String) +- `organization_id` (String) +- `organization_roles` (List of String) + +### Optional + +- `allowed_cidrs` (List of String) +- `description` (String) +- `expiry` (Number) +- `resources` (Attributes List) (see [below for nested schema](#nestedatt--resources)) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `id` (String) The ID of this resource. +- `token` (String, Sensitive) + + +### Nested Schema for `resources` + +Required: + +- `id` (String) +- `roles` (List of String) + +Optional: + +- `type` (String) + + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/resources/database_credential.md b/docs/resources/database_credential.md new file mode 100644 index 00000000..d34b3b2e --- /dev/null +++ b/docs/resources/database_credential.md @@ -0,0 +1,88 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_database_credential Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_database_credential (Resource) + + + + + + +## Schema + +### Required + +- `cluster_id` (String) +- `name` (String) +- `organization_id` (String) +- `project_id` (String) + +### Optional + +- `access` (Attributes List) (see [below for nested schema](#nestedatt--access)) +- `password` (String, Sensitive) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `id` (String) The ID of this resource. + + +### Nested Schema for `access` + +Required: + +- `privileges` (List of String) + +Optional: + +- `resources` (Attributes) (see [below for nested schema](#nestedatt--access--resources)) + + +### Nested Schema for `access.resources` + +Optional: + +- `buckets` (Attributes List) (see [below for nested schema](#nestedatt--access--resources--buckets)) + + +### Nested Schema for `access.resources.buckets` + +Required: + +- `name` (String) + +Optional: + +- `scopes` (Attributes List) (see [below for nested schema](#nestedatt--access--resources--buckets--scopes)) + + +### Nested Schema for `access.resources.buckets.scopes` + +Required: + +- `name` (String) + +Optional: + +- `collections` (List of String) + + + + + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/resources/project.md b/docs/resources/project.md new file mode 100644 index 00000000..8763070c --- /dev/null +++ b/docs/resources/project.md @@ -0,0 +1,43 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_project Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_project (Resource) + + + + + + +## Schema + +### Required + +- `name` (String) +- `organization_id` (String) + +### Optional + +- `description` (String) +- `if_match` (String) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `etag` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/docs/resources/user.md b/docs/resources/user.md new file mode 100644 index 00000000..4d9f7049 --- /dev/null +++ b/docs/resources/user.md @@ -0,0 +1,65 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "capella_user Resource - terraform-provider-capella" +subcategory: "" +description: |- + +--- + +# capella_user (Resource) + + + + + + +## Schema + +### Required + +- `email` (String) +- `organization_id` (String) +- `organization_roles` (List of String) + +### Optional + +- `if_match` (String) +- `name` (String) +- `resources` (Attributes List) (see [below for nested schema](#nestedatt--resources)) + +### Read-Only + +- `audit` (Attributes) (see [below for nested schema](#nestedatt--audit)) +- `enable_notifications` (Boolean) +- `etag` (String) +- `expires_at` (String) +- `id` (String) The ID of this resource. +- `inactive` (Boolean) +- `last_login` (String) +- `region` (String) +- `status` (String) +- `time_zone` (String) + + +### Nested Schema for `resources` + +Required: + +- `id` (String) +- `roles` (List of String) + +Optional: + +- `type` (String) + + + +### Nested Schema for `audit` + +Read-Only: + +- `created_at` (String) +- `created_by` (String) +- `modified_at` (String) +- `modified_by` (String) +- `version` (Number) diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..03613400 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,4 @@ +# Examples + +This directory contains examples that are mostly used for documentation, but can also be run/tested manually via the Terraform CLI. + diff --git a/examples/allowlist/README.md b/examples/allowlist/README.md new file mode 100644 index 00000000..05cde342 --- /dev/null +++ b/examples/allowlist/README.md @@ -0,0 +1,159 @@ +# Capella AllowList Example + +This example shows how to create and manage AllowLists in Capella. + +This creates a new allowlist in the selected Capella cluster. It uses the organization ID, projectId and clusterId to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Create a new allowlist entry in an existing Capella cluster as stated in the `create_allowlist.tf` file. + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_allowlist.new_allowlist will be created + + resource "capella_allowlist" "new_allowlist" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + cidr = "10.0.0.0/16" + + cluster_id = "f3818c88-3016-4c01-b3db-233173d8e4fd" + + comment = "Allow access from any ip address" + + expires_at = "2023-11-14T21:49:58.465Z" + + id = (known after apply) + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + example_allowlist = { + + audit = (known after apply) + + cidr = "10.0.0.0/16" + + cluster_id = "f3818c88-3016-4c01-b3db-233173d8e4fd" + + comment = "Allow access from any ip address" + + expires_at = "2023-11-14T21:49:58.465Z" + + id = (known after apply) + + if_match = null + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + } + +─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to create a new Allowlist entry + +Command: `terraform apply` + +Sample Output: +``` +$ terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_allowlist.new_allowlist will be created + + resource "capella_allowlist" "new_allowlist" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + cidr = "10.0.0.0/16" + + cluster_id = "f3818c88-3016-4c01-b3db-233173d8e4fd" + + comment = "Allow access from another VPC" + + expires_at = "2023-11-14T21:49:58.465Z" + + id = (known after apply) + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_allowlist = { + + audit = (known after apply) + + cidr = "10.0.0.0/16" + + cluster_id = "f3818c88-3016-4c01-b3db-233173d8e4fd" + + comment = "Allow access from another VPC" + + expires_at = "2023-11-14T21:49:58.465Z" + + id = (known after apply) + + if_match = null + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + } + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_allowlist.new_allowlist: Creating... +capella_allowlist.new_allowlist: Creation complete after 1s [id=08b1221f-33cf-42cd-a4d5-a35f6aa0763e] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +new_allowlist = { + "audit" = { + "created_at" = "2023-09-19 21:57:04.032017652 +0000 UTC" + "created_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "modified_at" = "2023-09-19 21:57:04.032017652 +0000 UTC" + "modified_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "version" = 1 + } + "cidr" = "10.0.0.0/16" + "cluster_id" = "f3818c88-3016-4c01-b3db-233173d8e4fd" + "comment" = "Allow access from another VPC" + "expires_at" = "2023-11-14T21:49:58.465Z" + "id" = "08b1221f-33cf-42cd-a4d5-a35f6aa0763e" + "if_match" = tostring(null) + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" + "project_id" = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" +} +``` + diff --git a/examples/allowlist/create_allowlist.tf b/examples/allowlist/create_allowlist.tf new file mode 100644 index 00000000..49b654c1 --- /dev/null +++ b/examples/allowlist/create_allowlist.tf @@ -0,0 +1,12 @@ +output "new_allowlist" { + value = capella_allowlist.new_allowlist +} + +resource "capella_allowlist" "new_allowlist" { + organization_id = var.organization_id + project_id = var.project_id + cluster_id = var.cluster_id + cidr = "10.0.0.0/16" + comment = "Allow access from another VPC" + expires_at = "2023-11-14T21:49:58.465Z" +} diff --git a/examples/allowlist/list_allowlists.tf b/examples/allowlist/list_allowlists.tf new file mode 100644 index 00000000..3f09c6fa --- /dev/null +++ b/examples/allowlist/list_allowlists.tf @@ -0,0 +1,9 @@ +output "projects_list" { + value = data.capella_allowlist.existing_allowlists +} + +data "capella_allowlist" "existing_allowlists" { + organization_id = var.organization_id + project_id = var.project_id + cluster_id = var.cluster_id +} diff --git a/examples/allowlist/main.tf b/examples/allowlist/main.tf new file mode 100644 index 00000000..0ad7da72 --- /dev/null +++ b/examples/allowlist/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} diff --git a/examples/allowlist/terraform.template.tfvars b/examples/allowlist/terraform.template.tfvars new file mode 100644 index 00000000..d1671cfe --- /dev/null +++ b/examples/allowlist/terraform.template.tfvars @@ -0,0 +1,5 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +project_id = "" +cluster_id = "" +host = "https://cloudapi.cloud.couchbase.com" diff --git a/examples/allowlist/variables.tf b/examples/allowlist/variables.tf new file mode 100644 index 00000000..c7b76e60 --- /dev/null +++ b/examples/allowlist/variables.tf @@ -0,0 +1,20 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "project_id" { + description = "Capella Project ID" +} + +variable "cluster_id" { + description = "Capella Cluster ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} diff --git a/examples/apikey/README.md b/examples/apikey/README.md new file mode 100644 index 00000000..959dc838 --- /dev/null +++ b/examples/apikey/README.md @@ -0,0 +1,192 @@ +# Capella Api Key Example + +This example shows how to create and manage Api Key in Capella. + +This creates a new api key in the organization. It uses the organization ID to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. +1. Create a new api key with the specified configuration. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/aniketkumar/.gvm/pkgsets/go1.19/global/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_apikey.new_apikey will be created + + resource "capella_apikey" "new_apikey" { + + allowed_cidrs = [ + + "10.1.42.0/23", + + "10.1.42.0/23", + ] + + audit = (known after apply) + + description = (known after apply) + + expiry = (known after apply) + + id = (known after apply) + + name = "New Terraform Api Key" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + organization_roles = [ + + "organizationMember", + ] + + resources = [ + + { + + id = (known after apply) + + roles = [ + + "projectManager", + + "projectDataReader", + ] + + type = "project" + }, + ] + + token = (sensitive value) + } + + # capella_project.existing_project will be created + + resource "capella_project" "existing_project" { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + existing_project = { + + description = "A Capella Project that will host many Capella clusters." + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + } + + new_apikey = (sensitive value) + +─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. + +``` + +### Apply the Plan, in order to create a new Api Key in Capella + +Command: `terraform apply` + +Sample Output: +``` +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/aniketkumar/.gvm/pkgsets/go1.19/global/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_apikey.new_apikey will be created + + resource "capella_apikey" "new_apikey" { + + allowed_cidrs = [ + + "10.1.42.0/23", + + "10.1.42.0/23", + ] + + audit = (known after apply) + + description = (known after apply) + + expiry = (known after apply) + + id = (known after apply) + + name = "New Terraform Api Key" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + organization_roles = [ + + "organizationMember", + ] + + resources = [ + + { + + id = (known after apply) + + roles = [ + + "projectManager", + + "projectDataReader", + ] + + type = "project" + }, + ] + + token = (sensitive value) + } + + # capella_project.existing_project will be created + + resource "capella_project" "existing_project" { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + existing_project = { + + description = "A Capella Project that will host many Capella clusters." + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + } + + new_apikey = (sensitive value) + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_project.existing_project: Creating... +capella_project.existing_project: Creation complete after 1s [id=9f117f7d-f333-4f56-8390-6895819fccd5] +capella_apikey.new_apikey: Creating... +capella_apikey.new_apikey: Creation complete after 1s [id=fkqycwTdfWwO1torAKXi3nyNAmA1jPDJ] + +Apply complete! Resources: 2 added, 0 changed, 0 destroyed. + +Outputs: + +existing_project = { + "audit" = { + "created_at" = "2023-09-29 14:04:35.085325792 +0000 UTC" + "created_by" = "IRLp8qQwHiF4Ni3IIblH0nPBa4ox0p8I" + "modified_at" = "2023-09-29 14:04:35.085338407 +0000 UTC" + "modified_by" = "IRLp8qQwHiF4Ni3IIblH0nPBa4ox0p8I" + "version" = 1 + } + "description" = "A Capella Project that will host many Capella clusters." + "etag" = "Version: 1" + "id" = "9f117f7d-f333-4f56-8390-6895819fccd5" + "if_match" = tostring(null) + "name" = "terraform-couchbasecapella-project" + "organization_id" = "6af08c0a-8cab-4c1c-b257-b521575c16d0" +} +new_apikey = + +``` + diff --git a/examples/apikey/create_apikey.tf b/examples/apikey/create_apikey.tf new file mode 100644 index 00000000..9dad6a66 --- /dev/null +++ b/examples/apikey/create_apikey.tf @@ -0,0 +1,30 @@ +output "existing_project" { + value = capella_project.existing_project +} + +output "new_apikey" { + value = capella_apikey.new_apikey + sensitive = true +} + +resource "capella_project" "existing_project" { + organization_id = var.organization_id + name = var.project_name + description = "A Capella Project that will host many Capella clusters." +} + +resource "capella_apikey" "new_apikey" { + organization_id = var.organization_id + name = var.apikey.name + organization_roles = var.apikey.organization_roles + allowed_cidrs = var.apikey.allowed_cidrs + expiry = var.apikey.expiry + resources = [ + { + id = capella_project.existing_project.id + roles = var.resource.roles + type = var.resource.type + } + ] +} + diff --git a/examples/apikey/main.tf b/examples/apikey/main.tf new file mode 100644 index 00000000..9b86e51a --- /dev/null +++ b/examples/apikey/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} + diff --git a/examples/apikey/terraform.template.tfvars b/examples/apikey/terraform.template.tfvars new file mode 100644 index 00000000..a4f5dd19 --- /dev/null +++ b/examples/apikey/terraform.template.tfvars @@ -0,0 +1,17 @@ +auth_token = "v4-api-key-secret" +organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +host = "https://cloudapi.cloud.couchbase.com" + +apikey = { + name = "New Terraform Api Key" + description = "A Capella Api Key" + allowed_cidrs = ["10.1.42.0/23", "10.1.42.0/23"] + organization_roles = ["organizationMember"] + expiry = 179 +} + +resource = { + id = "resource id" + roles = ["projectManager", "projectDataReader"] + type = "project" +} diff --git a/examples/apikey/variables.tf b/examples/apikey/variables.tf new file mode 100644 index 00000000..789fc47d --- /dev/null +++ b/examples/apikey/variables.tf @@ -0,0 +1,40 @@ +variable "host" { + default = "https://cloudapi.dev.nonprod-project-avengers.com" + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "project_name" { + default = "terraform-couchbasecapella-project" + description = "Project Name for Project Created via Terraform" +} + +variable "apikey" { + description = "ApiKey creation details useful for apikey creation" + + type = object({ + name = string + description = string + allowed_cidrs = list(string) + organization_roles = list(string) + expiry = number + }) +} + +variable "resource" { + description = "Resource details useful for apikey creation" + + type = object({ + id = string + roles = list(string) + type = string + }) +} \ No newline at end of file diff --git a/examples/bucket/README.md b/examples/bucket/README.md new file mode 100644 index 00000000..2234d584 --- /dev/null +++ b/examples/bucket/README.md @@ -0,0 +1,267 @@ +# Capella Bucket Example + +This example shows how to create and manage Buckets in Capella. + +This creates a new bucket in the selected Capella cluster. It uses the organization ID, projectId and clusterId to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Create a new bucket in an existing Capella cluster as stated in the `create_bucket.tf` file. + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_bucket.new_bucket will be created + + resource "capella_bucket" "new_bucket" { + + cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + + conflict_resolution = "seqno" + + durability_level = "majorityAndPersistActive" + + eviction_policy = "fullEviction" + + flush = true + + id = (known after apply) + + memory_allocationinmb = 105 + + name = "test_bucket" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + + replicas = 2 + + stats = (known after apply) + + storage_backend = "couchstore" + + ttl = 100 + + type = "couchbase" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_bucket = { + + cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + + conflict_resolution = "seqno" + + durability_level = "majorityAndPersistActive" + + eviction_policy = "fullEviction" + + flush = true + + id = (known after apply) + + memory_allocationinmb = 105 + + name = "test_bucket" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + + replicas = 2 + + stats = (known after apply) + + storage_backend = "couchstore" + + ttl = 100 + + type = "couchbase" + } + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to create a new Bucket + +Command: `terraform apply` + +Sample Output: +``` +terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_bucket.new_bucket will be created + + resource "capella_bucket" "new_bucket" { + + cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + + conflict_resolution = "seqno" + + durability_level = "majorityAndPersistActive" + + eviction_policy = "fullEviction" + + flush = true + + id = (known after apply) + + memory_allocationinmb = 105 + + name = "test_bucket" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + + replicas = 2 + + stats = (known after apply) + + storage_backend = "couchstore" + + ttl = 100 + + type = "couchbase" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_bucket = { + + cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + + conflict_resolution = "seqno" + + durability_level = "majorityAndPersistActive" + + eviction_policy = "fullEviction" + + flush = true + + id = (known after apply) + + memory_allocationinmb = 105 + + name = "test_bucket" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + + replicas = 2 + + stats = (known after apply) + + storage_backend = "couchstore" + + ttl = 100 + + type = "couchbase" + } + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_bucket.new_bucket: Creating... +capella_bucket.new_bucket: Still creating... [10s elapsed] +capella_bucket.new_bucket: Creation complete after 13s [id=dGVzdF9idWNrZXQ=] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +new_bucket = { + "cluster_id" = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + "conflict_resolution" = "seqno" + "durability_level" = "majorityAndPersistActive" + "eviction_policy" = "fullEviction" + "flush" = true + "id" = "dGVzdF9idWNrZXQ=" + "memory_allocationinmb" = 105 + "name" = "test_bucket" + "organization_id" = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + "project_id" = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + "replicas" = 2 + "stats" = { + "disk_used_in_mib" = 0 + "item_count" = 0 + "memory_used_in_mib" = 0 + "ops_per_second" = 0 + } + "storage_backend" = "couchstore" + "ttl" = 100 + "type" = "couchbase" +} +``` + +### Finally, destroy the resources created by Terraform + +Command: `terraform destroy` + +Sample Output: + +``` +terraform destroy +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +capella_bucket.new_bucket: Refreshing state... [id=dGVzdF9idWNrZXQ=] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + # capella_bucket.new_bucket will be destroyed + - resource "capella_bucket" "new_bucket" { + - cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" -> null + - conflict_resolution = "seqno" -> null + - durability_level = "majorityAndPersistActive" -> null + - eviction_policy = "fullEviction" -> null + - flush = true -> null + - id = "dGVzdF9idWNrZXQ=" -> null + - memory_allocationinmb = 105 -> null + - name = "test_bucket" -> null + - organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" -> null + - project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" -> null + - replicas = 2 -> null + - stats = { + - disk_used_in_mib = 0 -> null + - item_count = 0 -> null + - memory_used_in_mib = 0 -> null + - ops_per_second = 0 -> null + } -> null + - storage_backend = "couchstore" -> null + - ttl = 100 -> null + - type = "couchbase" -> null + } + +Plan: 0 to add, 0 to change, 1 to destroy. + +Changes to Outputs: + - new_bucket = { + - cluster_id = "96f2e933-cf5e-407a-b9c7-926f706f89ef" + - conflict_resolution = "seqno" + - durability_level = "majorityAndPersistActive" + - eviction_policy = "fullEviction" + - flush = true + - id = "dGVzdF9idWNrZXQ=" + - memory_allocationinmb = 105 + - name = "test_bucket" + - organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + - project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + - replicas = 2 + - stats = { + - disk_used_in_mib = 0 + - item_count = 0 + - memory_used_in_mib = 0 + - ops_per_second = 0 + } + - storage_backend = "couchstore" + - ttl = 100 + - type = "couchbase" + } -> null + +Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + +capella_bucket.new_bucket: Destroying... [id=dGVzdF9idWNrZXQ=] +capella_bucket.new_bucket: Destruction complete after 1s + +Destroy complete! Resources: 1 destroyed. +``` + diff --git a/examples/bucket/create_bucket.tf b/examples/bucket/create_bucket.tf new file mode 100644 index 00000000..cade35e7 --- /dev/null +++ b/examples/bucket/create_bucket.tf @@ -0,0 +1,19 @@ +output "new_bucket" { + value = capella_bucket.new_bucket +} + +resource "capella_bucket" "new_bucket" { + name = var.bucket.name + organization_id = var.organization_id + project_id = var.project_id + cluster_id = var.cluster_id + type = var.bucket.type + storage_backend = var.bucket.storage_backend + memory_allocationinmb = var.bucket.memory_allocationinmb + conflict_resolution = var.bucket.conflict_resolution + durability_level = var.bucket.durability_level + replicas = var.bucket.replicas + flush = var.bucket.flush + ttl = var.bucket.ttl + eviction_policy = var.bucket.eviction_policy +} \ No newline at end of file diff --git a/examples/bucket/main.tf b/examples/bucket/main.tf new file mode 100644 index 00000000..9b86e51a --- /dev/null +++ b/examples/bucket/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} + diff --git a/examples/bucket/terraform.template.tfvars b/examples/bucket/terraform.template.tfvars new file mode 100644 index 00000000..39e3262c --- /dev/null +++ b/examples/bucket/terraform.template.tfvars @@ -0,0 +1,20 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +project_id = "" +cluster_id = "" +host = "https://cloudapi.cloud.couchbase.com" + +bucket = { + name = "test_bucket" + type = "couchbase" + storage_backend = "couchstore" + memory_allocationinmb = 105 + conflict_resolution = "seqno" + durability_level = "majorityAndPersistActive" + replicas = 2 + flush = true + ttl = 100 +} + + + diff --git a/examples/bucket/variables.tf b/examples/bucket/variables.tf new file mode 100644 index 00000000..690931bb --- /dev/null +++ b/examples/bucket/variables.tf @@ -0,0 +1,37 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "project_id" { + description = "Capella Project ID" +} + +variable "cluster_id" { + description = "Capella Cluster ID" +} + +variable "bucket" { + description = "Bucket configuration details useful for creation" + + type = object({ + name = string + type = optional(string) + storage_backend = optional(string) + memory_allocationinmb = optional(number) + conflict_resolution = optional(string) + durability_level = optional(string) + replicas = optional(number) + flush = optional(bool) + ttl = optional(number) + eviction_policy = optional(string) + }) +} \ No newline at end of file diff --git a/examples/certificate/README.md b/examples/certificate/README.md new file mode 100644 index 00000000..2d889d56 --- /dev/null +++ b/examples/certificate/README.md @@ -0,0 +1,169 @@ +# Capella Certificate Example + +This example shows how to manage Certificates in Capella. + +This gets an existing Certificate in the cluster. It uses the organization ID, project ID and cluster ID to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Get an existing certificate in Capella as stated in the `get_certificate.tf` file. + +If you check the `terraform.template.tfvars` file - you can see that we need 5 main variables to run the terraform commands. +Make sure you copy the file to `terraform.tfvars` and update the values of the variables as per the correct organization access. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_certificates.existing_certificates: Reading... +data.capella_certificates.existing_certificates: Read complete after 1s + +Changes to Outputs: + + certificates_get = { + + certificate = <<-EOT + -----BEGIN CERTIFICATE----- + MIIDFTCCAf2gAwIBAgIRANguFcFZ7eVLTF2mnPqkkhYwDQYJKoZIhvcNAQELBQAw + JDESMBAGA1UECgwJQ291Y2hiYXNlMQ4wDAYDVQQLDAVDbG91ZDAeFw0xOTEwMTgx + NDUzMzRaFw0yOTEwMTgxNTUzMzRaMCQxEjAQBgNVBAoMCUNvdWNoYmFzZTEOMAwG + A1UECwwFQ2xvdWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDMoL2G + 1yR4XKOL5KrAZbgJI11NkcooxqCSqoibr5nSM+GNARlou42XbopRhkLQlSMlmH7U + ZreI7xq2MqmCaQvP1jdS5al/GwuwAP+2kU2nz4IHzliCVV6YvYqNy0fygNpYky9/ + wjCu32n8Ae0AZuxcsAzPUtJBvIIGHum08WlLYS3gNrYkfyds6LfvZvqMk703RL5X + Ny/RXWmbbBXAXh0chsavEK7EsDLI4t4WI2Iv8+lwS7Wo7Vh6NnEmJLPAAp7udNK4 + U3nwjkL5p/yINROT7CxUE9x0IB2l2rZwZiJhgHCpee77J8QesDut+jZu38ZYY3le + PS38S81T6I6bSSgtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE + FLlocLdzgAeibrlCmEO4OH5Buf3vMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0B + AQsFAAOCAQEAkoVX5CJ7rGx2ALfzy5C7Z+tmEmrZ6jdHjDtw4XwWNhlrsgMuuboU + Y9XMinSSm1TVfvIz4ru82MVMRxq4v1tPwPdZabbzKYclHkwSMxK5BkyEKWzF1Hoq + UcinTaT68lVzkTc0D8T+gkRzwXIqxjML2ZdruD1foHNzCgeGHzKzdsjYqrnHv17b + J+f5tqoa5CKbnyWl3HP0k7r3HHQP0GQequoqXcL3XlERX3Ne20Chck9mftNnHhKw + Dby7ylZaP97sphqOZQ/W/gza7x1JYylrLXvjfdv3Nmu7oSMKO/2cDyWwcbVGkpbk + 8JOQtFENWmr9u2S0cQfwoCSYBWaK0ofivA== + -----END CERTIFICATE----- + EOT + + cluster_id = "6072278e-2354-4ea0-9e1b-ff18aafd41df" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + } + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to get the certificate + +Command: `terraform apply` + +Sample Output: +``` +terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_certificates.existing_certificates: Reading... +data.capella_certificates.existing_certificates: Read complete after 0s + +Changes to Outputs: + + certificates_get = { + + certificate = <<-EOT + -----BEGIN CERTIFICATE----- + MIIDFTCCAf2gAwIBAgIRANguFcFZ7eVLTF2mnPqkkhYwDQYJKoZIhvcNAQELBQAw + JDESMBAGA1UECgwJQ291Y2hiYXNlMQ4wDAYDVQQLDAVDbG91ZDAeFw0xOTEwMTgx + NDUzMzRaFw0yOTEwMTgxNTUzMzRaMCQxEjAQBgNVBAoMCUNvdWNoYmFzZTEOMAwG + A1UECwwFQ2xvdWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDMoL2G + 1yR4XKOL5KrAZbgJI11NkcooxqCSqoibr5nSM+GNARlou42XbopRhkLQlSMlmH7U + ZreI7xq2MqmCaQvP1jdS5al/GwuwAP+2kU2nz4IHzliCVV6YvYqNy0fygNpYky9/ + wjCu32n8Ae0AZuxcsAzPUtJBvIIGHum08WlLYS3gNrYkfyds6LfvZvqMk703RL5X + Ny/RXWmbbBXAXh0chsavEK7EsDLI4t4WI2Iv8+lwS7Wo7Vh6NnEmJLPAAp7udNK4 + U3nwjkL5p/yINROT7CxUE9x0IB2l2rZwZiJhgHCpee77J8QesDut+jZu38ZYY3le + PS38S81T6I6bSSgtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE + FLlocLdzgAeibrlCmEO4OH5Buf3vMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0B + AQsFAAOCAQEAkoVX5CJ7rGx2ALfzy5C7Z+tmEmrZ6jdHjDtw4XwWNhlrsgMuuboU + Y9XMinSSm1TVfvIz4ru82MVMRxq4v1tPwPdZabbzKYclHkwSMxK5BkyEKWzF1Hoq + UcinTaT68lVzkTc0D8T+gkRzwXIqxjML2ZdruD1foHNzCgeGHzKzdsjYqrnHv17b + J+f5tqoa5CKbnyWl3HP0k7r3HHQP0GQequoqXcL3XlERX3Ne20Chck9mftNnHhKw + Dby7ylZaP97sphqOZQ/W/gza7x1JYylrLXvjfdv3Nmu7oSMKO/2cDyWwcbVGkpbk + 8JOQtFENWmr9u2S0cQfwoCSYBWaK0ofivA== + -----END CERTIFICATE----- + EOT + + cluster_id = "6072278e-2354-4ea0-9e1b-ff18aafd41df" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + project_id = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" + } + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +certificates_get = { + "certificate" = <<-EOT + -----BEGIN CERTIFICATE----- + MIIDFTCCAf2gAwIBAgIRANguFcFZ7eVLTF2mnPqkkhYwDQYJKoZIhvcNAQELBQAw + JDESMBAGA1UECgwJQ291Y2hiYXNlMQ4wDAYDVQQLDAVDbG91ZDAeFw0xOTEwMTgx + NDUzMzRaFw0yOTEwMTgxNTUzMzRaMCQxEjAQBgNVBAoMCUNvdWNoYmFzZTEOMAwG + A1UECwwFQ2xvdWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDMoL2G + 1yR4XKOL5KrAZbgJI11NkcooxqCSqoibr5nSM+GNARlou42XbopRhkLQlSMlmH7U + ZreI7xq2MqmCaQvP1jdS5al/GwuwAP+2kU2nz4IHzliCVV6YvYqNy0fygNpYky9/ + wjCu32n8Ae0AZuxcsAzPUtJBvIIGHum08WlLYS3gNrYkfyds6LfvZvqMk703RL5X + Ny/RXWmbbBXAXh0chsavEK7EsDLI4t4WI2Iv8+lwS7Wo7Vh6NnEmJLPAAp7udNK4 + U3nwjkL5p/yINROT7CxUE9x0IB2l2rZwZiJhgHCpee77J8QesDut+jZu38ZYY3le + PS38S81T6I6bSSgtAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE + FLlocLdzgAeibrlCmEO4OH5Buf3vMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0B + AQsFAAOCAQEAkoVX5CJ7rGx2ALfzy5C7Z+tmEmrZ6jdHjDtw4XwWNhlrsgMuuboU + Y9XMinSSm1TVfvIz4ru82MVMRxq4v1tPwPdZabbzKYclHkwSMxK5BkyEKWzF1Hoq + UcinTaT68lVzkTc0D8T+gkRzwXIqxjML2ZdruD1foHNzCgeGHzKzdsjYqrnHv17b + J+f5tqoa5CKbnyWl3HP0k7r3HHQP0GQequoqXcL3XlERX3Ne20Chck9mftNnHhKw + Dby7ylZaP97sphqOZQ/W/gza7x1JYylrLXvjfdv3Nmu7oSMKO/2cDyWwcbVGkpbk + 8JOQtFENWmr9u2S0cQfwoCSYBWaK0ofivA== + -----END CERTIFICATE----- + EOT + "cluster_id" = "6072278e-2354-4ea0-9e1b-ff18aafd41df" + "organization_id" = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + "project_id" = "f14134f2-7943-4e7b-b2c5-fc2071728b6e" +} +``` + +### List the resources that are present in the Terraform State file. + +Command: `terraform state list` + +Sample Output: +``` +$ terraform state list +data.capella_certificates.existing_certificates +``` diff --git a/examples/certificate/get_certificate.tf b/examples/certificate/get_certificate.tf new file mode 100644 index 00000000..53fc7147 --- /dev/null +++ b/examples/certificate/get_certificate.tf @@ -0,0 +1,9 @@ +output "certificates_get" { + value = data.capella_certificates.existing_certificates +} + +data "capella_certificates" "existing_certificates" { + organization_id = var.organization_id + project_id = var.project_id + cluster_id = var.cluster_id +} diff --git a/examples/certificate/main.tf b/examples/certificate/main.tf new file mode 100644 index 00000000..0ad7da72 --- /dev/null +++ b/examples/certificate/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} diff --git a/examples/certificate/terraform.template.tfvars b/examples/certificate/terraform.template.tfvars new file mode 100644 index 00000000..a51cd762 --- /dev/null +++ b/examples/certificate/terraform.template.tfvars @@ -0,0 +1,5 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +project_id = "" +cluster_id = "" +host = "https://cloudapi.cloud.couchbase.com" \ No newline at end of file diff --git a/examples/certificate/variables.tf b/examples/certificate/variables.tf new file mode 100644 index 00000000..c7b76e60 --- /dev/null +++ b/examples/certificate/variables.tf @@ -0,0 +1,20 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "project_id" { + description = "Capella Project ID" +} + +variable "cluster_id" { + description = "Capella Cluster ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} diff --git a/examples/cluster/README.md b/examples/cluster/README.md new file mode 100644 index 00000000..ee7eab6c --- /dev/null +++ b/examples/cluster/README.md @@ -0,0 +1,429 @@ +# Capella Cluster Example + +This example shows how to create and manage Clusters in Capella. + +This creates a new cluster in the selected Capella project. It uses the organization ID and projectId to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. +1. Create a new cluster with the specified configuration. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_cluster.new_cluster will be created + + resource "capella_cluster" "new_cluster" { + + app_service_id = (known after apply) + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + availability = { + + type = "multi" + } + + cloud_provider = { + + cidr = "192.168.0.0/20" + + region = "us-east-1" + + type = "aws" + } + + couchbase_server = { + + version = "7.1" + } + + current_state = (known after apply) + + description = "My first test cluster for multiple services." + + etag = (known after apply) + + id = (known after apply) + + name = "New Terraform Cluster" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = (known after apply) + + service_groups = [ + + { + + node = { + + compute = { + + cpu = 4 + + ram = 16 + } + + disk = { + + iops = 5000 + + storage = 50 + + type = "io2" + } + } + + num_of_nodes = 3 + + services = [ + + "data", + + "index", + + "query", + ] + }, + ] + + support = { + + plan = "developer pro" + + timezone = "PT" + } + } + + # capella_project.existing_project will be created + + resource "capella_project" "existing_project" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + existing_project = { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + + new_cluster = { + + app_service_id = (known after apply) + + audit = (known after apply) + + availability = { + + type = "multi" + } + + cloud_provider = { + + cidr = "192.168.0.0/20" + + region = "us-east-1" + + type = "aws" + } + + couchbase_server = { + + version = "7.1" + } + + current_state = (known after apply) + + description = "My first test cluster for multiple services." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "New Terraform Cluster" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = (known after apply) + + service_groups = [ + + { + + node = { + + compute = { + + cpu = 4 + + ram = 16 + } + + disk = { + + iops = 5000 + + storage = 50 + + type = "io2" + } + } + + num_of_nodes = 3 + + services = [ + + "data", + + "index", + + "query", + ] + }, + ] + + support = { + + plan = "developer pro" + + timezone = "PT" + } + } + +─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to create a new Cluster in Capella + +Command: `terraform apply` + +Sample Output: +``` +$ terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_cluster.new_cluster will be created + + resource "capella_cluster" "new_cluster" { + + app_service_id = (known after apply) + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + availability = { + + type = "multi" + } + + cloud_provider = { + + cidr = "192.168.0.0/20" + + region = "us-east-1" + + type = "aws" + } + + couchbase_server = { + + version = "7.1" + } + + current_state = (known after apply) + + description = "My first test cluster for multiple services." + + etag = (known after apply) + + id = (known after apply) + + name = "New Terraform Cluster" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = (known after apply) + + service_groups = [ + + { + + node = { + + compute = { + + cpu = 4 + + ram = 16 + } + + disk = { + + iops = 5000 + + storage = 50 + + type = "io2" + } + } + + num_of_nodes = 3 + + services = [ + + "data", + + "index", + + "query", + ] + }, + ] + + support = { + + plan = "developer pro" + + timezone = "PT" + } + } + + # capella_project.existing_project will be created + + resource "capella_project" "existing_project" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + existing_project = { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + + new_cluster = { + + app_service_id = (known after apply) + + audit = (known after apply) + + availability = { + + type = "multi" + } + + cloud_provider = { + + cidr = "192.168.0.0/20" + + region = "us-east-1" + + type = "aws" + } + + couchbase_server = { + + version = "7.1" + } + + current_state = (known after apply) + + description = "My first test cluster for multiple services." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "New Terraform Cluster" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + + project_id = (known after apply) + + service_groups = [ + + { + + node = { + + compute = { + + cpu = 4 + + ram = 16 + } + + disk = { + + iops = 5000 + + storage = 50 + + type = "io2" + } + } + + num_of_nodes = 3 + + services = [ + + "data", + + "index", + + "query", + ] + }, + ] + + support = { + + plan = "developer pro" + + timezone = "PT" + } + } + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_project.existing_project: Creating... +capella_project.existing_project: Creation complete after 1s [id=3dac8dfb-69dd-4145-a852-11da0766d8a9] +capella_cluster.new_cluster: Creating... +capella_cluster.new_cluster: Still creating... [10s elapsed] +capella_cluster.new_cluster: Still creating... [20s elapsed] +capella_cluster.new_cluster: Still creating... [30s elapsed] +capella_cluster.new_cluster: Still creating... [40s elapsed] +capella_cluster.new_cluster: Still creating... [50s elapsed] +capella_cluster.new_cluster: Still creating... [1m0s elapsed] +capella_cluster.new_cluster: Still creating... [1m10s elapsed] +capella_cluster.new_cluster: Still creating... [1m20s elapsed] +capella_cluster.new_cluster: Still creating... [1m30s elapsed] +capella_cluster.new_cluster: Still creating... [1m40s elapsed] +capella_cluster.new_cluster: Still creating... [1m50s elapsed] +capella_cluster.new_cluster: Still creating... [2m0s elapsed] +capella_cluster.new_cluster: Still creating... [2m10s elapsed] +capella_cluster.new_cluster: Still creating... [2m20s elapsed] +capella_cluster.new_cluster: Still creating... [2m30s elapsed] +capella_cluster.new_cluster: Still creating... [2m40s elapsed] +capella_cluster.new_cluster: Still creating... [2m50s elapsed] +capella_cluster.new_cluster: Still creating... [3m0s elapsed] +capella_cluster.new_cluster: Creation complete after 3m3s [id=da519c25-79b2-477e-aa77-61e2ee7a677a] + +Apply complete! Resources: 2 added, 0 changed, 0 destroyed. + +Outputs: + +existing_project = { + "audit" = { + "created_at" = "2023-09-19 22:50:45.476879515 +0000 UTC" + "created_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "modified_at" = "2023-09-19 22:50:45.476901411 +0000 UTC" + "modified_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "version" = 1 + } + "description" = "A Capella Project that will host many Capella clusters." + "etag" = "Version: 1" + "id" = "3dac8dfb-69dd-4145-a852-11da0766d8a9" + "if_match" = tostring(null) + "name" = "terraform-couchbasecapella-project" + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} +new_cluster = { + "app_service_id" = tostring(null) + "audit" = { + "created_at" = "2023-09-19 22:50:45.937512888 +0000 UTC" + "created_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "modified_at" = "2023-09-19 22:53:48.367132006 +0000 UTC" + "modified_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "version" = 5 + } + "availability" = { + "type" = "multi" + } + "cloud_provider" = { + "cidr" = "192.168.0.0/20" + "region" = "us-east-1" + "type" = "aws" + } + "couchbase_server" = { + "version" = "7.1" + } + "current_state" = "healthy" + "description" = "My first test cluster for multiple services." + "etag" = "Version: 5" + "id" = "da519c25-79b2-477e-aa77-61e2ee7a677a" + "if_match" = tostring(null) + "name" = "New Terraform Cluster" + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" + "project_id" = "3dac8dfb-69dd-4145-a852-11da0766d8a9" + "service_groups" = tolist([ + { + "node" = { + "compute" = { + "cpu" = 4 + "ram" = 16 + } + "disk" = { + "iops" = 5000 + "storage" = 50 + "type" = "io2" + } + } + "num_of_nodes" = 3 + "services" = tolist([ + "data", + "index", + "query", + ]) + }, + ]) + "support" = { + "plan" = "developer pro" + "timezone" = "PT" + } +} +``` + diff --git a/examples/cluster/create_cluster.tf b/examples/cluster/create_cluster.tf new file mode 100644 index 00000000..9182c594 --- /dev/null +++ b/examples/cluster/create_cluster.tf @@ -0,0 +1,52 @@ +output "existing_project" { + value = capella_project.existing_project +} + +output "new_cluster" { + value = capella_cluster.new_cluster +} +resource "capella_project" "existing_project" { + organization_id = var.organization_id + name = var.project_name + description = "A Capella Project that will host many Capella clusters." +} + +resource "capella_cluster" "new_cluster" { + organization_id = var.organization_id + project_id = capella_project.existing_project.id + name = var.cluster.name + description = "My first test cluster for multiple services." + cloud_provider = { + type = var.cloud_provider.name + region = var.cloud_provider.region + cidr = var.cluster.cidr + } + couchbase_server = { + version = var.cluster.server_version + } + service_groups = [ + { + node = { + compute = { + cpu = var.compute.cpu + ram = var.compute.ram + } + disk = { + storage = var.disk.size + type = var.disk.type + iops = var.disk.iops + } + } + num_of_nodes = var.cluster.node_count + services = var.cluster.couchbase_services + } + ] + availability = { + "type" : var.cluster.availability_zone + } + support = { + plan = var.support.plan + timezone = var.support.timezone + } +} + diff --git a/examples/cluster/main.tf b/examples/cluster/main.tf new file mode 100644 index 00000000..9b86e51a --- /dev/null +++ b/examples/cluster/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} + diff --git a/examples/cluster/terraform.template.tfvars b/examples/cluster/terraform.template.tfvars new file mode 100644 index 00000000..b878dd15 --- /dev/null +++ b/examples/cluster/terraform.template.tfvars @@ -0,0 +1,33 @@ +auth_token = "v4-api-key-secret" +organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +host = "https://cloudapi.cloud.couchbase.com" + +cloud_provider = { + name = "aws", + region = "us-east-1" +} + +cluster = { + name = "New Terraform Cluster" + cidr = "192.168.0.0/20" + node_count = 3 + server_version = "7.1" + couchbase_services = ["data", "index", "query"] + availability_zone = "multi" +} + +compute = { + cpu = 4 + ram = 16 +} + +disk = { + size = 50 + type = "io2" + iops = 5000 +} + +support = { + plan = "developer pro" + timezone = "PT" +} diff --git a/examples/cluster/variables.tf b/examples/cluster/variables.tf new file mode 100644 index 00000000..cb70a7f3 --- /dev/null +++ b/examples/cluster/variables.tf @@ -0,0 +1,67 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "project_name" { + default = "terraform-couchbasecapella-project" + description = "Project Name for Project Created via Terraform" +} + +variable "cloud_provider" { + description = "Cloud Provider details useful for cluster creation" + + type = object({ + name = string + region = string + }) +} + +variable "cluster" { + description = "Cluster configuration details useful for creation" + + type = object({ + name = string + cidr = string + node_count = number + server_version = string + couchbase_services = list(string) + availability_zone = string + }) +} + +variable "compute" { + description = "All cluster node compute configuration" + + type = object({ + cpu = number + ram = number + }) +} + +variable "disk" { + description = "All nodes' disk configuration" + + type = object({ + size = number + type = string + iops = number + }) +} + +variable "support" { + description = "Support configuration applicable to the cluster during creation" + + type = object({ + plan = string + timezone = string + }) +} \ No newline at end of file diff --git a/examples/database_credential/README.md b/examples/database_credential/README.md new file mode 100644 index 00000000..cbe7798a --- /dev/null +++ b/examples/database_credential/README.md @@ -0,0 +1,325 @@ +# Capella Database Credentials Example + +This example shows how to create and manage Database Credentials in Capella. + +This creates a new database credential in the selected Capella organization. It uses the organization ID, project ID and cluster ID. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Create a new database credential in Capella as stated in the `create_database_credential.tf` file. +2. View the sensitive field i.e. database credential password after creation. +3. Update the database credential password. + +If you check the `terraform.template.tfvars` file - you can see that we need 7 main variables to run the terraform commands. +Make sure you copy the file to `terraform.tfvars` and update the values of the variables as per the correct organization access. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_database_credential.new_database_credential will be created + + resource "capella_database_credential" "new_database_credential" { + + access = [ + + { + + privileges = [ + + "data_reader", + + "data_writer", + ] + }, + ] + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + cluster_id = "c082af14-c244-40da-b54a-669392738569" + + id = (known after apply) + + name = "test_db_user" + + organization_id = "0783f698-ac58-4018-84a3-31c3b6ef785d" + + password = (sensitive value) + + project_id = "a1d1a971-092e-40d9-a68b-ef705573f3d8" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_database_credential = (sensitive value) + +────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to create a new Project + +Command: `terraform apply` + +Sample Output: +``` +$ terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_database_credential.new_database_credential will be created + + resource "capella_database_credential" "new_database_credential" { + + access = [ + + { + + privileges = [ + + "data_reader", + + "data_writer", + ] + }, + ] + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + cluster_id = "c082af14-c244-40da-b54a-669392738569" + + id = (known after apply) + + name = "test_db_user" + + organization_id = "0783f698-ac58-4018-84a3-31c3b6ef785d" + + password = (sensitive value) + + project_id = "a1d1a971-092e-40d9-a68b-ef705573f3d8" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_database_credential = (sensitive value) + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_database_credential.new_database_credential: Creating... +capella_database_credential.new_database_credential: Creation complete after 2s [id=7ef4675e-513f-4358-a583-ae5c23e6fa67] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +new_database_credential = +``` + +### View the create db_credential Password +Command: `terraform output new_database_credential` + +Sample Output: +``` +$ terraform output new_database_credentials +╷ +│ Error: Output "new_database_credentials" not found +│ +│ The output variable requested could not be found in the state file. If you recently added this to your configuration, be sure to run `terraform apply`, since the state +│ won't be updated with new output variables until that command is run. +╵ +macos:database_credential talina.shrotriya$ terraform output new_database_credential +{ + "access" = tolist([ + { + "privileges" = tolist([ + "data_reader", + "data_writer", + ]) + }, + ]) + "audit" = { + "created_at" = "2023-09-28 23:03:39.742677746 +0000 UTC" + "created_by" = "wTQ5WXpeWsNpfXTVOIz12FzqH8Ye7m2p" + "modified_at" = "2023-09-28 23:03:39.742677746 +0000 UTC" + "modified_by" = "wTQ5WXpeWsNpfXTVOIz12FzqH8Ye7m2p" + "version" = 1 + } + "cluster_id" = "c082af14-c244-40da-b54a-669392738569" + "id" = "7ef4675e-513f-4358-a583-ae5c23e6fa67" + "name" = "test_db_user" + "organization_id" = "0783f698-ac58-4018-84a3-31c3b6ef785d" + "password" = "Secret12$#" + "project_id" = "a1d1a971-092e-40d9-a68b-ef705573f3d8" +} +``` + +### Update the database credential password +- Change the password in the terraform.tfvars file. +- Execute terraform plan + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with +│ published releases. +╵ +capella_database_credential.new_database_credential: Refreshing state... [id=1a92d0cf-6c41-481f-ad10-c843bd7837f1] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # capella_database_credential.new_database_credential will be updated in-place + ~ resource "capella_database_credential" "new_database_credential" { + ~ access = [ + ~ { + ~ privileges = [ + "data_reader", + - "data_writer", + ] + }, + ] + ~ audit = { + ~ created_at = "2023-10-03 01:12:14.215211005 +0000 UTC" -> (known after apply) + ~ created_by = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" -> (known after apply) + ~ modified_at = "2023-10-03 01:12:14.215211005 +0000 UTC" -> (known after apply) + ~ modified_by = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" -> (known after apply) + ~ version = 1 -> (known after apply) + } + id = "1a92d0cf-6c41-481f-ad10-c843bd7837f1" + name = "test_db_user" + ~ password = (sensitive value) + # (3 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Changes to Outputs: + ~ new_database_credential = (sensitive value) +``` + +- Execute terraform apply +Sample Output: +``` +$ terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with +│ published releases. +╵ +capella_database_credential.new_database_credential: Refreshing state... [id=1a92d0cf-6c41-481f-ad10-c843bd7837f1] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + ~ update in-place + +Terraform will perform the following actions: + + # capella_database_credential.new_database_credential will be updated in-place + ~ resource "capella_database_credential" "new_database_credential" { + ~ access = [ + ~ { + ~ privileges = [ + "data_reader", + - "data_writer", + ] + }, + ] + ~ audit = { + ~ created_at = "2023-10-03 01:12:14.215211005 +0000 UTC" -> (known after apply) + ~ created_by = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" -> (known after apply) + ~ modified_at = "2023-10-03 01:12:14.215211005 +0000 UTC" -> (known after apply) + ~ modified_by = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" -> (known after apply) + ~ version = 1 -> (known after apply) + } + id = "1a92d0cf-6c41-481f-ad10-c843bd7837f1" + name = "test_db_user" + ~ password = (sensitive value) + # (3 unchanged attributes hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Changes to Outputs: + ~ new_database_credential = (sensitive value) + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_database_credential.new_database_credential: Modifying... [id=1a92d0cf-6c41-481f-ad10-c843bd7837f1] +capella_database_credential.new_database_credential: Modifications complete after 2s [id=1a92d0cf-6c41-481f-ad10-c843bd7837f1] + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. + +Outputs: + +new_database_credential = +``` + +- Finally, we can confirm if the password was updated by running the terraform output command. + +``` +$ terraform output new_database_credential +{ + "access" = tolist([ + { + "privileges" = tolist([ + "data_reader", + ]) + "resources" = null /* object */ + }, + ]) + "audit" = { + "created_at" = "2023-10-03 01:12:14.215211005 +0000 UTC" + "created_by" = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" + "modified_at" = "2023-10-03 01:12:14.215211005 +0000 UTC" + "modified_by" = "osxKeibDiShFFyyqAVNvqWRaWryXBxBD" + "version" = 1 + } + "cluster_id" = "c082af14-c244-40da-b54a-669392738569" + "id" = "1a92d0cf-6c41-481f-ad10-c843bd7837f1" + "name" = "test_db_user" + "organization_id" = "0783f698-ac58-4018-84a3-31c3b6ef785d" + "password" = "NewSecret12$#" + "project_id" = "a1d1a971-092e-40d9-a68b-ef705573f3d8" +} +``` \ No newline at end of file diff --git a/examples/database_credential/create_database_credential.tf b/examples/database_credential/create_database_credential.tf new file mode 100644 index 00000000..476486d3 --- /dev/null +++ b/examples/database_credential/create_database_credential.tf @@ -0,0 +1,18 @@ +output "new_database_credential" { + value = capella_database_credential.new_database_credential + sensitive = true +} + +resource "capella_database_credential" "new_database_credential" { + name = var.database_credential_name + organization_id = var.organization_id + project_id = var.project_id + cluster_id = var.cluster_id + password = var.password + access = [ + { + privileges = ["data_reader", "data_writer"] + } + ] +} + diff --git a/examples/database_credential/main.tf b/examples/database_credential/main.tf new file mode 100644 index 00000000..9b86e51a --- /dev/null +++ b/examples/database_credential/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} + diff --git a/examples/database_credential/terraform.template.tfvars b/examples/database_credential/terraform.template.tfvars new file mode 100644 index 00000000..8d8059b6 --- /dev/null +++ b/examples/database_credential/terraform.template.tfvars @@ -0,0 +1,7 @@ +auth_token = "my-secret-key" +organization_id = "0783f698-ac58-4018-84a3-31c3b6ef785d" +project_id = "b9b467e2-cca0-4fc7-ab54-3cc2faba51c9" +host = "https://cloudapi.cloud.couchbase.com" +database_credential_name = "test_db_user" +cluster_id = "546caf5b-f495-45fc-93e5-7a40e0ee2a17" +password = "Secret12$#" diff --git a/examples/database_credential/variables.tf b/examples/database_credential/variables.tf new file mode 100644 index 00000000..0bb90c9d --- /dev/null +++ b/examples/database_credential/variables.tf @@ -0,0 +1,29 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "project_id" { + description = "Capella Project ID" +} + +variable "database_credential_name" { + description = "Database Credentials Name" +} + +variable "cluster_id" { + description = "Capella Cluster ID" +} + +variable "password" { + description = "password for database credential" + sensitive = true +} diff --git a/examples/organization/README.md b/examples/organization/README.md new file mode 100644 index 00000000..25e894e7 --- /dev/null +++ b/examples/organization/README.md @@ -0,0 +1,210 @@ +# Capella Organization Example + +This example shows how to manage Organization in Capella. + +This fetches the details of an existing Organization. It uses the organization ID to do so. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Get an existing organization in Capella as stated in the `get_organization.tf` file. + +If you check the `terraform.template.tfvars` file - you can see that we need 3 main variables to run the terraform commands. +Make sure you copy the file to `terraform.tfvars` and update the values of the variables as per the correct organization access. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_organization.existing_organization: Reading... +data.capella_organization.existing_organization: Read complete after 1s [name=cbc-dev] + +Changes to Outputs: + + existing_organization = { + + audit = { + + created_at = "2020-07-22 12:38:57.437248116 +0000 UTC" + + created_by = "" + + modified_at = "2023-07-25 14:33:56.13967014 +0000 UTC" + + modified_by = "99b8dc97-b8ae-44af-8ccd-897e3802c3cb" + + version = 0 + } + + description = "" + + name = "cbc-dev" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + preferences = { + + session_duration = 7200 + } + } + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. + + +``` + +### Apply the Plan, in order to get the organization + +Command: `terraform apply` + +Sample Output: +``` +terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_organization.existing_organization: Reading... +data.capella_organization.existing_organization: Read complete after 1s [name=cbc-dev] + +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +existing_organization = { + "audit" = { + "created_at" = "2020-07-22 12:38:57.437248116 +0000 UTC" + "created_by" = "" + "modified_at" = "2023-07-25 14:33:56.13967014 +0000 UTC" + "modified_by" = "99b8dc97-b8ae-44af-8ccd-897e3802c3cb" + "version" = 0 + } + "description" = "" + "name" = "cbc-dev" + "organization_id" = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + "preferences" = { + "session_duration" = 7200 + } +} +nidhi.kumar@QFXY6XF4V3 organization % terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_organization.existing_organization: Reading... +data.capella_organization.existing_organization: Read complete after 1s [name=cbc-dev] + +Changes to Outputs: + + existing_organization = { + + audit = { + + created_at = "2020-07-22 12:38:57.437248116 +0000 UTC" + + created_by = "" + + modified_at = "2023-07-25 14:33:56.13967014 +0000 UTC" + + modified_by = "99b8dc97-b8ae-44af-8ccd-897e3802c3cb" + + version = 0 + } + + description = "" + + name = "cbc-dev" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + preferences = { + + session_duration = 7200 + } + } + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +nidhi.kumar@QFXY6XF4V3 organization % terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/nidhi.kumar/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published +│ releases. +╵ +data.capella_organization.existing_organization: Reading... +data.capella_organization.existing_organization: Read complete after 1s [name=cbc-dev] + +Changes to Outputs: + + existing_organization = { + + audit = { + + created_at = "2020-07-22 12:38:57.437248116 +0000 UTC" + + created_by = "" + + modified_at = "2023-07-25 14:33:56.13967014 +0000 UTC" + + modified_by = "99b8dc97-b8ae-44af-8ccd-897e3802c3cb" + + version = 0 + } + + description = "" + + name = "cbc-dev" + + organization_id = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + + preferences = { + + session_duration = 7200 + } + } + +You can apply this plan to save these new output values to the Terraform state, without changing any real infrastructure. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + + +Apply complete! Resources: 0 added, 0 changed, 0 destroyed. + +Outputs: + +existing_organization = { + "audit" = { + "created_at" = "2020-07-22 12:38:57.437248116 +0000 UTC" + "created_by" = "" + "modified_at" = "2023-07-25 14:33:56.13967014 +0000 UTC" + "modified_by" = "99b8dc97-b8ae-44af-8ccd-897e3802c3cb" + "version" = 0 + } + "description" = "" + "name" = "cbc-dev" + "organization_id" = "6af08c0a-8cab-4c1c-b257-b521575c16d0" + "preferences" = { + "session_duration" = 7200 + } +} + +``` + +### List the resources that are present in the Terraform State file. + +Command: `terraform state list` + +Sample Output: +``` +$ terraform state list +data.capella_organization.existing_organization +``` diff --git a/examples/organization/get_organization.tf b/examples/organization/get_organization.tf new file mode 100644 index 00000000..5e07bc06 --- /dev/null +++ b/examples/organization/get_organization.tf @@ -0,0 +1,7 @@ +output "existing_organization" { + value = data.capella_organization.existing_organization +} + +data "capella_organization" "existing_organization" { + organization_id = var.organization_id +} diff --git a/examples/organization/main.tf b/examples/organization/main.tf new file mode 100644 index 00000000..0ad7da72 --- /dev/null +++ b/examples/organization/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} diff --git a/examples/organization/terraform.template.tfvars b/examples/organization/terraform.template.tfvars new file mode 100644 index 00000000..5faba0e8 --- /dev/null +++ b/examples/organization/terraform.template.tfvars @@ -0,0 +1,3 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +host = "https://cloudapi.cloud.couchbase.com" \ No newline at end of file diff --git a/examples/organization/variables.tf b/examples/organization/variables.tf new file mode 100644 index 00000000..f55a3099 --- /dev/null +++ b/examples/organization/variables.tf @@ -0,0 +1,12 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "organization_id" { + description = "Capella Organization ID" +} diff --git a/examples/project/README.md b/examples/project/README.md new file mode 100644 index 00000000..b8a74fef --- /dev/null +++ b/examples/project/README.md @@ -0,0 +1,472 @@ +# Capella Projects Example + +This example shows how to create and manage Projects in Capella. + +This creates a new project in the selected Capella organization and lists existing Projects in the organization. It uses the organization ID to create and list Projects. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough + +In this example, we are going to do the following. + +1. Create a new project in Capella as stated in the `create_project.tf` file. +2. List existing projects in Capella as stated in the `list_projects.tf` file. +3. Import a project that exists in Capella but not in the terraform state file. +4. Delete the newly created project from Capella. + +If you check the `terraform.template.tfvars` file - you can see that we need 3 main variables to run the terraform commands. +Make sure you copy the file to `terraform.tfvars` and update the values of the variables as per the correct organization access. + + +### View the plan for the resources that Terraform will create + +Command: `terraform plan` + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_projects.existing_projects: Reading... +data.capella_projects.existing_projects: Read complete after 1s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_project.new_project will be created + + resource "capella_project" "new_project" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_project = { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + + projects_list = { + + data = [ + + { + + audit = { + + created_at = "2023-09-19 20:38:55.873822668 +0000 UTC" + + created_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + + modified_at = "2023-09-19 20:38:55.873836582 +0000 UTC" + + modified_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + + version = 1 + } + + description = "" + + etag = null + + id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + + if_match = null + + name = "Tacos" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + ] + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +### Apply the Plan, in order to create a new Project + +Command: `terraform apply` + +Sample Output: +``` +$ terraform apply +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_projects.existing_projects: Reading... +data.capella_projects.existing_projects: Read complete after 1s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_project.new_project will be created + + resource "capella_project" "new_project" { + + audit = { + + created_at = (known after apply) + + created_by = (known after apply) + + modified_at = (known after apply) + + modified_by = (known after apply) + + version = (known after apply) + } + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_project = { + + audit = (known after apply) + + description = "A Capella Project that will host many Capella clusters." + + etag = (known after apply) + + id = (known after apply) + + if_match = null + + name = "terraform-couchbasecapella-project" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + + projects_list = { + + data = [ + + { + + audit = { + + created_at = "2023-09-19 20:38:55.873822668 +0000 UTC" + + created_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + + modified_at = "2023-09-19 20:38:55.873836582 +0000 UTC" + + modified_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + + version = 1 + } + + description = "" + + etag = null + + id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + + if_match = null + + name = "Tacos" + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + ] + + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_project.new_project: Creating... +capella_project.new_project: Creation complete after 1s [id=95b69ba0-23f8-45bf-8640-8ea99e8860fd] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +new_project = { + "audit" = { + "created_at" = "2023-09-19 20:39:45.392955893 +0000 UTC" + "created_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "modified_at" = "2023-09-19 20:39:45.392987613 +0000 UTC" + "modified_by" = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + "version" = 1 + } + "description" = "A Capella Project that will host many Capella clusters." + "etag" = "Version: 1" + "id" = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" + "if_match" = tostring(null) + "name" = "terraform-couchbasecapella-project" + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} +projects_list = { + "data" = tolist([ + { + "audit" = { + "created_at" = "2023-09-19 20:38:55.873822668 +0000 UTC" + "created_by" = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + "modified_at" = "2023-09-19 20:38:55.873836582 +0000 UTC" + "modified_by" = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + "version" = 1 + } + "description" = "" + "etag" = tostring(null) + "id" = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + "if_match" = tostring(null) + "name" = "Tacos" + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + ]) + "organization_id" = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} +``` + +### Note the Project ID for the new Project +Command: `terraform show` + +Sample Output: +``` +$ terraform show +# capella_project.new_project: +resource "capella_project" "new_project" { + audit = { + created_at = "2023-09-19 20:39:45.392955893 +0000 UTC" + created_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + modified_at = "2023-09-19 20:39:45.392987613 +0000 UTC" + modified_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + version = 1 + } + description = "A Capella Project that will host many Capella clusters." + etag = "Version: 1" + id = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" + name = "terraform-couchbasecapella-project" + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} + +# data.capella_projects.existing_projects: +data "capella_projects" "existing_projects" { + data = [ + # (1 unchanged element hidden) + ] + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} + + +Outputs: + +new_project = { + audit = { + created_at = "2023-09-19 20:39:45.392955893 +0000 UTC" + created_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + modified_at = "2023-09-19 20:39:45.392987613 +0000 UTC" + modified_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + version = 1 + } + description = "A Capella Project that will host many Capella clusters." + etag = "Version: 1" + id = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" + if_match = null + name = "terraform-couchbasecapella-project" + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} +projects_list = { + data = [ + { + audit = { + created_at = "2023-09-19 20:38:55.873822668 +0000 UTC" + created_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + modified_at = "2023-09-19 20:38:55.873836582 +0000 UTC" + modified_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + version = 1 + } + description = "" + etag = null + id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + if_match = null + name = "Tacos" + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + ] + organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" +} +``` + +In this case, the Project ID for my new project is `95b69ba0-23f8-45bf-8640-8ea99e8860fd` + +### List the resources that are present in the Terraform State file. + +Command: `terraform state list` + +Sample Output: +``` +$ terraform state list +data.capella_projects.existing_projects +capella_project.new_project +``` + +### Remove the resource `new_project` from the Terraform State file + +Command: `terraform state rm capella_project.new_project` + +Sample Output: +``` +$ terraform state rm capella_project.new_project +Removed capella_project.new_project +Successfully removed 1 resource instance(s). +``` + +Please note, this command will only remove the resource from the Terraform State file, but in reality, the resource exists in Capella. + +### Now, let's import the resource in Terraform + +Command: `terraform import capella_project.new_project id=,organization_id=` + +In this case, the complete command is: +`terraform import capella_project.new_project id=95b69ba0-23f8-45bf-8640-8ea99e8860fd,organization_id=bdb8662c-7157-46ea-956f-ed86f4c75211` + +Sample Output: +``` +$ terraform import capella_project.new_project id=95b69ba0-23f8-45bf-8640-8ea99e8860fd,organization_id=bdb8662c-7157-46ea-956f-ed86f4c75211 +capella_project.new_project: Importing from ID "id=95b69ba0-23f8-45bf-8640-8ea99e8860fd,organization_id=bdb8662c-7157-46ea-956f-ed86f4c75211"... +data.capella_projects.existing_projects: Reading... +capella_project.new_project: Import prepared! + Prepared capella_project for import +capella_project.new_project: Refreshing state... [id=id=95b69ba0-23f8-45bf-8640-8ea99e8860fd,organization_id=bdb8662c-7157-46ea-956f-ed86f4c75211] +data.capella_projects.existing_projects: Read complete after 0s + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + +Here, we pass the IDs as a single comma-separated string. +The first ID in the string is the project ID i.e. the ID of the resource that we want to import +The second ID is the organization ID i.e. the ID of the organization to which the project belongs to. + +### Let's run a terraform plan to confirm that the import was successful and no resource states were impacted + +Command: `terraform plan` + +Sample Output: +``` +$ terraform plan +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_projects.existing_projects: Reading... +capella_project.new_project: Refreshing state... [id=95b69ba0-23f8-45bf-8640-8ea99e8860fd] +data.capella_projects.existing_projects: Read complete after 1s + +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. +``` + +### Finally, destroy the resources created by Terraform + +Command: `terraform destroy` + +Sample Output: +``` +$ terraform destroy +╷ +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/talina.shrotriya/workspace/terraform-provider-capella +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_projects.existing_projects: Reading... +capella_project.new_project: Refreshing state... [id=95b69ba0-23f8-45bf-8640-8ea99e8860fd] +data.capella_projects.existing_projects: Read complete after 1s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + # capella_project.new_project will be destroyed + - resource "capella_project" "new_project" { + - audit = { + - created_at = "2023-09-19 20:39:45.392955893 +0000 UTC" -> null + - created_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" -> null + - modified_at = "2023-09-19 20:39:45.392987613 +0000 UTC" -> null + - modified_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" -> null + - version = 1 -> null + } + - description = "A Capella Project that will host many Capella clusters." -> null + - etag = "Version: 1" -> null + - id = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" -> null + - name = "terraform-couchbasecapella-project" -> null + - organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" -> null + } + +Plan: 0 to add, 0 to change, 1 to destroy. + +Changes to Outputs: + - new_project = { + - audit = { + - created_at = "2023-09-19 20:39:45.392955893 +0000 UTC" + - created_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + - modified_at = "2023-09-19 20:39:45.392987613 +0000 UTC" + - modified_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + - version = 1 + } + - description = "A Capella Project that will host many Capella clusters." + - etag = "Version: 1" + - id = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" + - if_match = null + - name = "terraform-couchbasecapella-project" + - organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } -> null + - projects_list = { + - data = [ + - { + - audit = { + - created_at = "2023-09-19 20:39:45.392955893 +0000 UTC" + - created_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + - modified_at = "2023-09-19 20:39:45.392987613 +0000 UTC" + - modified_by = "7eEPh2Jdzb3fwRNesFoONpyAkq5nhAfK" + - version = 1 + } + - description = "A Capella Project that will host many Capella clusters." + - etag = null + - id = "95b69ba0-23f8-45bf-8640-8ea99e8860fd" + - if_match = null + - name = "terraform-couchbasecapella-project" + - organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + - { + - audit = { + - created_at = "2023-09-19 20:38:55.873822668 +0000 UTC" + - created_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + - modified_at = "2023-09-19 20:38:55.873836582 +0000 UTC" + - modified_by = "bff4a7f5-33c0-4324-bb40-0890a01a20ae" + - version = 1 + } + - description = "" + - etag = null + - id = "e912ed02-8ac4-403c-a0c5-67c57284a5a4" + - if_match = null + - name = "Tacos" + - organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + }, + ] + - organization_id = "bdb8662c-7157-46ea-956f-ed86f4c75211" + } -> null + +Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + +capella_project.new_project: Destroying... [id=95b69ba0-23f8-45bf-8640-8ea99e8860fd] +capella_project.new_project: Destruction complete after 1s + +Destroy complete! Resources: 1 destroyed. +``` \ No newline at end of file diff --git a/examples/project/create_project.tf b/examples/project/create_project.tf new file mode 100644 index 00000000..8bdf39ff --- /dev/null +++ b/examples/project/create_project.tf @@ -0,0 +1,10 @@ +output "new_project" { + value = capella_project.new_project +} + +resource "capella_project" "new_project" { + organization_id = var.organization_id + name = var.project_name + description = "A Capella Project that will host many Capella clusters." +} + diff --git a/examples/project/list_projects.tf b/examples/project/list_projects.tf new file mode 100644 index 00000000..1f0b3ade --- /dev/null +++ b/examples/project/list_projects.tf @@ -0,0 +1,7 @@ +output "projects_list" { + value = data.capella_projects.existing_projects +} + +data "capella_projects" "existing_projects" { + organization_id = var.organization_id +} diff --git a/examples/project/main.tf b/examples/project/main.tf new file mode 100644 index 00000000..0ad7da72 --- /dev/null +++ b/examples/project/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} diff --git a/examples/project/terraform.template.tfvars b/examples/project/terraform.template.tfvars new file mode 100644 index 00000000..5faba0e8 --- /dev/null +++ b/examples/project/terraform.template.tfvars @@ -0,0 +1,3 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +host = "https://cloudapi.cloud.couchbase.com" \ No newline at end of file diff --git a/examples/project/variables.tf b/examples/project/variables.tf new file mode 100644 index 00000000..0f391c9d --- /dev/null +++ b/examples/project/variables.tf @@ -0,0 +1,17 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "project_name" { + default = "terraform-couchbasecapella-project" + description = "Project Name for Project Created via Terraform" +} \ No newline at end of file diff --git a/examples/user/README.md b/examples/user/README.md new file mode 100644 index 00000000..501a3b2b --- /dev/null +++ b/examples/user/README.md @@ -0,0 +1,693 @@ +# Capella User Example + +This example shows how to create and manage users in Capella. + +This creates a new user in the selected Capella project. It uses the organization ID and projectId to do so. + +An invitation email is triggered and sent to the user. Upon receiving the invitation email, the user is required to click on a provided URL, which will redirect them to a page with a user interface (UI) where they can set their username and password. + +The modification of any personal information related to a user can only be performed by the user through the UI. Similarly, the user can solely conduct password updates through the UI. + +The "caller" possessing Organization Owner access rights retains the exclusive user creation capability. They hold the authority to assign roles at the organization and project levels. + +At present, our support is limited to the capella resourceType of "project" exclusively. + +To run, configure your Couchbase Capella provider as described in README in the root of this project. + +# Example Walkthrough +### View the plan for the resources that Terraform will create + +Command: +``` +terraform plan +``` +Sample Output: +``` + +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/mattymaclean/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become +│ incompatible with published releases. +╵ +data.capella_users.existing_users: Reading... +data.capella_users.existing_users: Read complete after 0s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the +following symbols: + + create + +Terraform will perform the following actions: + + # capella_user.new_user will be created + + resource "capella_user" "new_user" { + + audit = (known after apply) + + email = "matty.maclean+2@couchbase.com" + + enable_notifications = (known after apply) + + expires_at = (known after apply) + + id = (known after apply) + + inactive = (known after apply) + + last_login = (known after apply) + + name = "Matty" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "projectCreator", + ] + + region = (known after apply) + + resources = [ + + { + + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + + roles = [ + + "projectDataReaderWriter", + ] + + type = "project" + }, + ] + + status = (known after apply) + + time_zone = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_user = { + + audit = (known after apply) + + email = "matty.maclean+2@couchbase.com" + + enable_notifications = (known after apply) + + expires_at = (known after apply) + + id = (known after apply) + + inactive = (known after apply) + + last_login = (known after apply) + + name = "Matty" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "projectCreator", + ] + + region = (known after apply) + + resources = [ + + { + + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + + roles = [ + + "projectDataReaderWriter", + ] + + type = "project" + }, + ] + + status = (known after apply) + + time_zone = (known after apply) + } + + users_list = { + + data = [ + + { + + audit = { + + created_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + + created_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + modified_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + + modified_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + version = 1 + } + + email = "matty.maclean@couchbase.com" + + enable_notifications = false + + expires_at = "2024-01-04T15:43:02.805868342Z" + + id = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + inactive = false + + last_login = "2023-10-06T15:47:57.491646422Z" + + name = "matty.maclean" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "organizationOwner", + ] + + region = "" + + resources = null + + status = "verified" + + time_zone = "" + }, + ] + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + } + +────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run +"terraform apply" now. +``` + +### Apply the Plan, in order to create a new User in Capella +Command: +``` +terraform apply +``` + +Sample Output: +``` +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/mattymaclean/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_users.existing_users: Reading... +data.capella_users.existing_users: Read complete after 0s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # capella_user.new_user will be created + + resource "capella_user" "new_user" { + + audit = (known after apply) + + email = "matty.maclean+2@couchbase.com" + + enable_notifications = (known after apply) + + expires_at = (known after apply) + + id = (known after apply) + + inactive = (known after apply) + + last_login = (known after apply) + + name = "Matty" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "projectCreator", + ] + + region = (known after apply) + + resources = [ + + { + + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + + roles = [ + + "projectDataReaderWriter", + ] + + type = "project" + }, + ] + + status = (known after apply) + + time_zone = (known after apply) + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + new_user = { + + audit = (known after apply) + + email = "matty.maclean+2@couchbase.com" + + enable_notifications = (known after apply) + + expires_at = (known after apply) + + id = (known after apply) + + inactive = (known after apply) + + last_login = (known after apply) + + name = "Matty" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "projectCreator", + ] + + region = (known after apply) + + resources = [ + + { + + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + + roles = [ + + "projectDataReaderWriter", + ] + + type = "project" + }, + ] + + status = (known after apply) + + time_zone = (known after apply) + } + + users_list = { + + data = [ + + { + + audit = { + + created_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + + created_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + modified_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + + modified_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + version = 1 + } + + email = "matty.maclean@couchbase.com" + + enable_notifications = false + + expires_at = "2024-01-04T15:43:02.805868342Z" + + id = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + + inactive = false + + last_login = "2023-10-06T15:47:57.491646422Z" + + name = "matty.maclean" + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + + organization_roles = [ + + "organizationOwner", + ] + + region = "" + + resources = null + + status = "verified" + + time_zone = "" + }, + ] + + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + } + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +capella_user.new_user: Creating... +capella_user.new_user: Creation complete after 1s [id=9ddcf5d2-901e-457c-9d62-4709ef0eb46d] + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +new_user = { + "audit" = { + "created_at" = "2023-10-06 16:05:06.620419302 +0000 UTC" + "created_by" = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + "modified_at" = "2023-10-06 16:05:06.620419302 +0000 UTC" + "modified_by" = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + "version" = 1 + } + "email" = "matty.maclean+2@couchbase.com" + "enable_notifications" = false + "expires_at" = "2024-01-04T16:05:06.620419427Z" + "id" = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + "inactive" = true + "last_login" = "" + "name" = "Matty" + "organization_id" = "93f13778-3d11-43c5-861f-417a4b00ba81" + "organization_roles" = tolist([ + "projectCreator", + ]) + "region" = "" + "resources" = tolist([ + { + "id" = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + "roles" = tolist([ + "projectDataReaderWriter", + ]) + "type" = "project" + }, + ]) + "status" = "not-verified" + "time_zone" = "" +} +users_list = { + "data" = tolist([ + { + "audit" = { + "created_at" = "2023-10-06 15:43:02.805868342 +0000 UTC" + "created_by" = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + "modified_at" = "2023-10-06 15:43:02.805868342 +0000 UTC" + "modified_by" = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + "version" = 1 + } + "email" = "matty.maclean@couchbase.com" + "enable_notifications" = false + "expires_at" = "2024-01-04T15:43:02.805868342Z" + "id" = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + "inactive" = false + "last_login" = "2023-10-06T15:47:57.491646422Z" + "name" = "matty.maclean" + "organization_id" = "93f13778-3d11-43c5-861f-417a4b00ba81" + "organization_roles" = tolist([ + "organizationOwner", + ]) + "region" = "" + "resources" = tolist(null) /* of object */ + "status" = "verified" + "time_zone" = "" + }, + ]) + "organization_id" = "93f13778-3d11-43c5-861f-417a4b00ba81" +} +``` +### Note the User ID for the new User +Command: +``` +terraform show +``` + +Sample Output: +``` +# data.capella_users.existing_users: +data "capella_users" "existing_users" { + data = [ + { + audit = { + created_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + created_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + modified_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + modified_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + version = 1 + } + email = "matty.maclean@couchbase.com" + enable_notifications = false + expires_at = "2024-01-04T15:43:02.805868342Z" + id = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + inactive = false + last_login = "2023-10-06T15:47:57.491646422Z" + name = "matty.maclean" + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + organization_roles = [ + "organizationOwner", + ] + region = "" + status = "verified" + time_zone = "" + }, + ] + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" +} + +# capella_user.new_user: +resource "capella_user" "new_user" { + audit = { + created_at = "2023-10-06 16:05:06.620419302 +0000 UTC" + created_by = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + modified_at = "2023-10-06 16:05:06.620419302 +0000 UTC" + modified_by = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + version = 1 + } + email = "matty.maclean+2@couchbase.com" + enable_notifications = false + expires_at = "2024-01-04T16:05:06.620419427Z" + id = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + inactive = true + name = "Matty" + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + organization_roles = [ + "projectCreator", + ] + resources = [ + { + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + roles = [ + "projectDataReaderWriter", + ] + type = "project" + }, + ] + status = "not-verified" +} + + +Outputs: + +new_user = { + audit = { + created_at = "2023-10-06 16:05:06.620419302 +0000 UTC" + created_by = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + modified_at = "2023-10-06 16:05:06.620419302 +0000 UTC" + modified_by = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + version = 1 + } + email = "matty.maclean+2@couchbase.com" + enable_notifications = false + expires_at = "2024-01-04T16:05:06.620419427Z" + id = "9ddcf5d2-901e-457c-9d62-4709ef0eb46d" + inactive = true + last_login = "" + name = "Matty" + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + organization_roles = [ + "projectCreator", + ] + region = "" + resources = [ + { + id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + roles = [ + "projectDataReaderWriter", + ] + type = "project" + }, + ] + status = "not-verified" + time_zone = "" +} +users_list = { + data = [ + { + audit = { + created_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + created_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + modified_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + modified_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + version = 1 + } + email = "matty.maclean@couchbase.com" + enable_notifications = false + expires_at = "2024-01-04T15:43:02.805868342Z" + id = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + inactive = false + last_login = "2023-10-06T15:47:57.491646422Z" + name = "matty.maclean" + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + organization_roles = [ + "organizationOwner", + ] + region = "" + status = "verified" + time_zone = "" + }, + ] + organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" +} +``` +### List the resources that are present in the Terraform State file. + +Command: +``` +terraform state list +``` + +Sample Output: +``` +data.capella_users.existing_users +capella_user.new_user +``` +### Remove the resource `new_user` from the Terraform State file + +Command: `terraform state rm capella_user.new_user` + +Sample Output: +``` +Removed capella_user.new_user +Successfully removed 1 resource instance(s). +``` + +Please note, this command will only remove the resource from the Terraform State file, but in reality, the resource exists in Capella. +### Now, let's import the resource in Terraform +Command: `terraform import capella_user.new_user id=,organization_id=` + +In this case, the complete command is: +`` +Sample Output: +``` +var.user_email + Email address of the user + + Enter a value: matty.maclean+2@couchbase.com + +var.user_name + Name of the user + + Enter a value: matty + +capella_user.new_user: Importing from ID "id=47c321f7-571c-46bb-ac1f-146aa5aec314,organization_id=93f13778-3d11-43c5-861f-417a4b00ba81"... +capella_user.new_user: Import prepared! + Prepared capella_user for import +data.capella_users.existing_users: Reading... +capella_user.new_user: Refreshing state... [id=id=47c321f7-571c-46bb-ac1f-146aa5aec314,organization_id=93f13778-3d11-43c5-861f-417a4b00ba81] +data.capella_users.existing_users: Read complete after 0s + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` +### Let's run a terraform plan to confirm that the import was successful and no resource states were impacted +Command: +``` +terraform plan +``` + +Sample Output: +``` +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/mattymaclean/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_users.existing_users: Reading... +capella_user.new_user: Refreshing state... [id=47c321f7-571c-46bb-ac1f-146aa5aec314] +data.capella_users.existing_users: Read complete after 0s + +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. +``` + +### Finally, destroy the resources created by Terraform +Command: +``` +terraform destroy +``` + +Sample Output: +``` +│ Warning: Provider development overrides are in effect +│ +│ The following provider development overrides are set in the CLI configuration: +│ - hashicorp.com/couchabasecloud/capella in /Users/mattymaclean/go/bin +│ +│ The behavior may therefore not match any released version of the provider and applying changes may cause the state to become incompatible with published releases. +╵ +data.capella_users.existing_users: Reading... +capella_user.new_user: Refreshing state... [id=47c321f7-571c-46bb-ac1f-146aa5aec314] +data.capella_users.existing_users: Read complete after 0s + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + # capella_user.new_user will be destroyed + - resource "capella_user" "new_user" { + - audit = { + - created_at = "2023-10-06 16:19:55.734127171 +0000 UTC" -> null + - created_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" -> null + - modified_at = "2023-10-06 16:19:55.734127171 +0000 UTC" -> null + - modified_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" -> null + - version = 1 -> null + } -> null + - email = "matty.maclean+2@couchbase.com" -> null + - enable_notifications = false -> null + - expires_at = "2024-01-04T16:19:55.734127296Z" -> null + - id = "47c321f7-571c-46bb-ac1f-146aa5aec314" -> null + - inactive = true -> null + - name = "Matty" -> null + - organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" -> null + - organization_roles = [ + - "projectCreator", + ] -> null + - resources = [ + - { + - id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" -> null + - roles = [ + - "projectDataReaderWriter", + ] -> null + - type = "project" -> null + }, + ] -> null + - status = "not-verified" -> null + } + +Plan: 0 to add, 0 to change, 1 to destroy. + +Changes to Outputs: + - new_user = { + - audit = { + - created_at = "2023-10-06 16:19:55.734127171 +0000 UTC" + - created_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - modified_at = "2023-10-06 16:19:55.734127171 +0000 UTC" + - modified_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - version = 1 + } + - email = "matty.maclean+2@couchbase.com" + - enable_notifications = false + - expires_at = "2024-01-04T16:19:55.734127296Z" + - id = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - inactive = true + - last_login = "" + - name = "Matty" + - organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + - organization_roles = [ + - "projectCreator", + ] + - region = "" + - resources = [ + - { + - id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + - roles = [ + - "projectDataReaderWriter", + ] + - type = "project" + }, + ] + - status = "not-verified" + - time_zone = "" + } -> null + - users_list = { + - data = [ + - { + - audit = { + - created_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + - created_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + - modified_at = "2023-10-06 15:43:02.805868342 +0000 UTC" + - modified_by = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + - version = 1 + } + - email = "matty.maclean@couchbase.com" + - enable_notifications = false + - expires_at = "2024-01-04T15:43:02.805868342Z" + - id = "a1acd4c3-5604-4050-80a5-58d4886e75b6" + - inactive = false + - last_login = "2023-10-06T15:47:57.491646422Z" + - name = "matty.maclean" + - organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + - organization_roles = [ + - "organizationOwner", + ] + - region = "" + - resources = null + - status = "verified" + - time_zone = "" + }, + - { + - audit = { + - created_at = "2023-10-06 16:19:55.734127171 +0000 UTC" + - created_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - modified_at = "2023-10-06 16:19:55.734127171 +0000 UTC" + - modified_by = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - version = 1 + } + - email = "matty.maclean+2@couchbase.com" + - enable_notifications = false + - expires_at = "2024-01-04T16:19:55.734127296Z" + - id = "47c321f7-571c-46bb-ac1f-146aa5aec314" + - inactive = true + - last_login = "" + - name = "Matty" + - organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + - organization_roles = [ + - "projectCreator", + ] + - region = "" + - resources = [ + - { + - id = "81f7bd87-6e62-4c7f-9a7e-be231c74b538" + - roles = [ + - "projectDataReaderWriter", + ] + - type = "project" + }, + ] + - status = "not-verified" + - time_zone = "" + }, + ] + - organization_id = "93f13778-3d11-43c5-861f-417a4b00ba81" + } -> null + +Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + +capella_user.new_user: Destroying... [id=47c321f7-571c-46bb-ac1f-146aa5aec314] +capella_user.new_user: Destruction complete after 0s + +Destroy complete! Resources: 1 destroyed. +``` diff --git a/examples/user/create_user.tf b/examples/user/create_user.tf new file mode 100644 index 00000000..6f395d8e --- /dev/null +++ b/examples/user/create_user.tf @@ -0,0 +1,17 @@ +output "new_user" { + value = capella_user.new_user +} + +resource "capella_user" "new_user" { + organization_id = var.organization_id + name = var.user_name + email = var.user_email + organization_roles = var.org_roles + resources = [ + { + type = "project" + id = var.project_id + roles = var.project_roles + } + ] +} diff --git a/examples/user/list_users.tf b/examples/user/list_users.tf new file mode 100644 index 00000000..c8fef87a --- /dev/null +++ b/examples/user/list_users.tf @@ -0,0 +1,7 @@ +output "users_list" { + value = data.capella_users.existing_users +} + +data "capella_users" "existing_users" { + organization_id = var.organization_id +} diff --git a/examples/user/main.tf b/examples/user/main.tf new file mode 100644 index 00000000..0ad7da72 --- /dev/null +++ b/examples/user/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + capella = { + source = "hashicorp.com/couchabasecloud/capella" + } + } +} + +provider "capella" { + host = var.host + authentication_token = var.auth_token +} diff --git a/examples/user/terraform.template.tfvars b/examples/user/terraform.template.tfvars new file mode 100644 index 00000000..27b5940d --- /dev/null +++ b/examples/user/terraform.template.tfvars @@ -0,0 +1,9 @@ +auth_token = "v4-api-key-secret" +organization_id = "" +project_id = "" +host = "https://cloudapi.cloud.couchbase.com" + +user_name = "John" +user_email = "john.doe@example.com" +org_roles = ["projectCreator"] +project_roles = ["projectDataReaderWriter"] diff --git a/examples/user/variables.tf b/examples/user/variables.tf new file mode 100644 index 00000000..2e52da28 --- /dev/null +++ b/examples/user/variables.tf @@ -0,0 +1,38 @@ +variable "host" { + description = "The Host URL of Couchbase Cloud." +} + +variable "organization_id" { + description = "Capella Organization ID" +} + +variable "project_id" { + description = "Capella Project ID" +} + +variable "auth_token" { + description = "Authentication API Key" + sensitive = true +} + +variable "user_name" { + description = "Name of the user" + type = string +} + +variable "user_email" { + description = "Email address of the user" + type = string +} + +variable "org_roles" { + description = "Roles of the user within the organization" + type = list(string) + default = [] +} + +variable "project_roles" { + description = "Roles of the user within the project" + type = list(string) + default = [] +} diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..d535f06c --- /dev/null +++ b/go.mod @@ -0,0 +1,38 @@ +module terraform-provider-capella + +go 1.20 + +require ( + github.com/google/uuid v1.3.1 + github.com/hashicorp/terraform-plugin-framework v1.4.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-log v0.9.0 + github.com/stretchr/testify v1.7.2 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-plugin v1.5.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/terraform-plugin-go v0.19.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.2 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + golang.org/x/net v0.13.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc v1.57.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..303cb8c2 --- /dev/null +++ b/go.sum @@ -0,0 +1,82 @@ +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k= +github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/terraform-plugin-framework v1.4.0 h1:WKbtCRtNrjsh10eA7NZvC/Qyr7zp77j+D21aDO5th9c= +github.com/hashicorp/terraform-plugin-framework v1.4.0/go.mod h1:XC0hPcQbBvlbxwmjxuV/8sn8SbZRg4XwGMs22f+kqV0= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-go v0.19.0 h1:BuZx/6Cp+lkmiG0cOBk6Zps0Cb2tmqQpDM3iAtnhDQU= +github.com/hashicorp/terraform-plugin-go v0.19.0/go.mod h1:EhRSkEPNoylLQntYsk5KrDHTZJh9HQoumZXbOGOXmec= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno= +github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/api/allowlist.go b/internal/api/allowlist.go new file mode 100644 index 00000000..c9d29a9d --- /dev/null +++ b/internal/api/allowlist.go @@ -0,0 +1,46 @@ +package api + +import ( + "github.com/google/uuid" +) + +// CreateAllowListRequest defines the model for CreateAllowListRequest. +type CreateAllowListRequest struct { + // Cidr is the trusted CIDR to allow database connections from. + Cidr string `json:"cidr"` + + // Comment is a short description of the allowed CIDR. + Comment string `json:"comment,omitempty"` + + // ExpiresAt is an RFC3339 timestamp determining when the allowed CIDR should expire. + ExpiresAt string `json:"expiresAt,omitempty"` +} + +// CreateAllowListResponse defines the model for CreateAllowListResponse. +type CreateAllowListResponse struct { + // ID is the ID of the AllowList + Id uuid.UUID `json:"id"` +} + +// GetAllowListResponse defines the model for GetAllowListResponse. +type GetAllowListResponse struct { + // Audit contains all audit-related fields. + Audit CouchbaseAuditData `json:"audit"` + + // Cidr is the trusted CIDR to allow database connections from. + Cidr string `json:"cidr"` + + // Comment is a short description of the allowed CIDR. + Comment *string `json:"comment"` + + // ExpiresAt is an RFC3339 timestamp determining when the allowed CIDR should expire. + ExpiresAt *string `json:"expiresAt"` + + // ID is the ID of the AllowList + Id uuid.UUID `json:"id"` +} + +// GetAllowListsReponse defines the model for GetAllowListsResponse +type GetAllowListsResponse struct { + Data []GetAllowListResponse `json:"data"` +} diff --git a/internal/api/apikey.go b/internal/api/apikey.go new file mode 100644 index 00000000..f30a912b --- /dev/null +++ b/internal/api/apikey.go @@ -0,0 +1,106 @@ +package api + +import ( + "github.com/google/uuid" +) + +// Resources are the resource level permissions associated with the API key. +// To learn more about Organization Roles, see +// [Organization Roles](https://docs.couchbase.com/cloud/organizations/organization-user-roles.html). +type Resources = []ResourcesItems + +// ResourcesItems defines model for APIKeyResourcesItems. +type ResourcesItems struct { + // Id is the id of the project. + Id uuid.UUID `json:"id"` + + // Roles are the project roles associated with the API key. + // To learn more about Project Roles, see + //[Project Roles](https://docs.couchbase.com/cloud/projects/project-roles.html). + Roles []string `json:"roles"` + + // Type is the type of the resource. + Type *string `json:"type,omitempty"` +} + +// CreateApiKeyRequest defines model for CreateAPIKeyRequest. +type CreateApiKeyRequest struct { + // AllowedCIDRs is the list of inbound CIDRs for the API key. + // The system making a request must come from one of the allowed CIDRs. + AllowedCIDRs *[]string `json:"allowedCIDRs,omitempty"` + + // Description is the description for the API key. + Description *string `json:"description,omitempty"` + + // Expiry is the expiry of the API key in number of days. + // If set to -1, the token will not expire. + Expiry *float32 `json:"expiry,omitempty"` + + // Name is the name of the API key. + Name string `json:"name"` + + // OrganizationRoles are the organization level roles granted to the API key. + OrganizationRoles []string `json:"organizationRoles"` + + // Resources are the resource level permissions associated with the API key. + // To learn more about Organization Roles, see + // [Organization Roles](https://docs.couchbase.com/cloud/organizations/organization-user-roles.html). + Resources *Resources `json:"resources,omitempty"` +} + +// CreateApiKeyResponse defines model for CreateAPIKeyResponse. +type CreateApiKeyResponse struct { + // Id The id is a unique identifier for an apiKey. + Id string `json:"id"` + + // Token The Token is a confidential piece of information that is used to authorize requests made to v4 endpoints. + Token string `json:"token"` +} + +// GetApiKeyResponse defines model for GetAPIKey. +type GetApiKeyResponse struct { + // AllowedCIDRs is the list of inbound CIDRs for the API key. + // The system making a request must come from one of the allowed CIDRs. + AllowedCIDRs []string `json:"allowedCIDRs"` + Audit CouchbaseAuditData `json:"audit"` + + // Description is the description for the API key. + Description string `json:"description"` + + // Expiry is the expiry of the API key in number of days. + // If set to -1, the token will not expire. + Expiry float32 `json:"expiry"` + + // Id is the id is a unique identifier for an apiKey. + Id string `json:"id"` + + // Name is the name of the API key. + Name string `json:"name"` + + // OrganizationRoles are the organization level roles granted to the API key. + OrganizationRoles []string `json:"organizationRoles"` + + // Resources is the resources are the resource level permissions + // associated with the API key. To learn more about Organization Roles, see + // [Organization Roles](https://docs.couchbase.com/cloud/organizations/organization-user-roles.html). + Resources Resources `json:"resources"` +} + +// RotateAPIKeyRequest defines model for RotateAPIKeyRequest. +type RotateAPIKeyRequest struct { + // Secret represents the secret associated with an API key. One has to follow the secret key policy, such as allowed characters and a length of 64 characters. + // If this field is left empty, a secret will be auto-generated. + Secret *string `json:"secret,omitempty"` +} + +// RotateAPIKeyResponse defines model for RotateAPIKeyResponse. +type RotateAPIKeyResponse struct { + // SecretKey is a confidential token that is paired with the Access key. + // The API key is made of an Access key and a Secret key. + SecretKey string `json:"secretKey"` +} + +// GetApiKeysResponse defines the model for a GetApiKeysResponse. +type GetApiKeysResponse struct { + Data []GetApiKeyResponse `json:"data"` +} diff --git a/internal/api/audit.go b/internal/api/audit.go new file mode 100644 index 00000000..c4a030e0 --- /dev/null +++ b/internal/api/audit.go @@ -0,0 +1,27 @@ +package api + +import "time" + +// CouchbaseAuditData contains all audit-related fields. +type CouchbaseAuditData struct { + // CreatedAt The RFC3339 timestamp associated with when the resource was initially + // created. + CreatedAt time.Time `json:"createdAt"` + + // CreatedBy The user who created the resource; this will be a UUID4 ID for standard + // users and will be a string such as "internal-support" for internal + // Couchbase support users. + CreatedBy string `json:"createdBy"` + + // ModifiedAt The RFC3339 timestamp associated with when the resource was last modified. + ModifiedAt time.Time `json:"modifiedAt"` + + // ModifiedBy The user who last modified the resource; this will be a UUID4 ID for + // standard users and wilmal be a string such as "internal-support" for + // internal Couchbase support users. + ModifiedBy string `json:"modifiedBy"` + + // Version The version of the document. This value is incremented each time the + // resource is modified. + Version int `json:"version"` +} diff --git a/internal/api/bucket/bucket.go b/internal/api/bucket/bucket.go new file mode 100644 index 00000000..138868c8 --- /dev/null +++ b/internal/api/bucket/bucket.go @@ -0,0 +1,80 @@ +package bucket + +type CreateBucketRequest struct { + + // Name is the name of the cluster (up to 100 characters). + Name string `json:"name"` + + // Type represents the Bucket Type + Type string `json:"type"` + + // StorageBackend represents the storage engine used for the bucket. + StorageBackend string `json:"storageBackend"` + + // MemoryAllocationInMb is the amount of memory to allocate for the bucket memory in MiB + MemoryAllocationInMb int `json:"memoryAllocationInMb"` + + BucketConflictResolution string `json:"bucketConflictResolution"` + + DurabilityLevel string `json:"durabilityLevel"` + + Replicas int `json:"replicas"` + + Flush bool `json:"flush"` + + TimeToLiveInSeconds int `json:"timeToLiveInSeconds"` + + EvictionPolicy string `json:"evictionPolicy"` +} + +// CreateBucketResponse defines model for CreateBucketResponse. +type CreateBucketResponse struct { + // Id The ID of the bucket created. + Id string `json:"id"` +} + +type GetBucketResponse struct { + + // Id is the ID of the bucket created. + Id string `json:"id"` + + // Name is the name of the cluster (up to 100 characters). + Name string `json:"name"` + + // Type represents the Bucket Type + Type string `json:"type"` + + // StorageBackend represents the storage engine used for the bucket. + StorageBackend string `json:"storageBackend"` + + // MemoryAllocationInMb is the amount of memory to allocate for the bucket memory in MiB + MemoryAllocationInMb int `json:"memoryAllocationInMb"` + + BucketConflictResolution string `json:"bucketConflictResolution"` + + DurabilityLevel string `json:"durabilityLevel"` + + Replicas int `json:"replicas"` + + Flush bool `json:"flush"` + + TimeToLiveInSeconds int `json:"timeToLiveInSeconds"` + + EvictionPolicy string `json:"evictionPolicy"` + + Stats *Stats `json:"stats"` +} + +// PutBucketRequest defines model for PutBucketRequest. +type PutBucketRequest struct { + // MemoryAllocationInMb is the amount of memory to allocate for the bucket memory in MiB + MemoryAllocationInMb int `json:"memoryAllocationInMb"` + + DurabilityLevel string `json:"durabilityLevel"` + + Replicas int `json:"replicas"` + + Flush bool `json:"flush"` + + TimeToLiveInSeconds int `json:"timeToLiveInSeconds"` +} diff --git a/internal/api/bucket/stats.go b/internal/api/bucket/stats.go new file mode 100644 index 00000000..6376951a --- /dev/null +++ b/internal/api/bucket/stats.go @@ -0,0 +1,8 @@ +package bucket + +type Stats struct { + ItemCount int `json:"itemCount"` + OpsPerSecond int `json:"opsPerSecond"` + DiskUsedInMib int `json:"diskUsedInMib"` + MemoryUsedInMib int `json:"memoryUsedInMib"` +} diff --git a/internal/api/certificate.go b/internal/api/certificate.go new file mode 100644 index 00000000..d143a255 --- /dev/null +++ b/internal/api/certificate.go @@ -0,0 +1,7 @@ +package api + +// GetCertificateResponse defines model for GetCertificateResponse. +type GetCertificateResponse struct { + // Certificate is the certificate of the capella cluster + Certificate string `json:"certificate"` +} diff --git a/internal/api/client.go b/internal/api/client.go new file mode 100644 index 00000000..5899912b --- /dev/null +++ b/internal/api/client.go @@ -0,0 +1,78 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "terraform-provider-capella/internal/errors" +) + +// Client is responsible for constructing and executing HTTP requests. +type Client struct { + *http.Client +} + +// NewClient instantiates a new Client. +func NewClient(timeout time.Duration) *Client { + return &Client{ + Client: &http.Client{ + Timeout: timeout, + }, + } +} + +// Response struct is used to encapsulate the response details +type Response struct { + Response *http.Response + Body []byte +} + +// Execute is used to construct and execute a HTTP request. +// It then returns the response. +func (c *Client) Execute(url string, method string, payload any, authToken string, headers map[string]string) (response *Response, err error) { + var requestBody []byte + if payload != nil { + requestBody, err = json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("%s: %w", errors.ErrMarshallingPayload, err) + } + } + + req, err := http.NewRequest(method, url, bytes.NewReader(requestBody)) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrConstructingRequest, err) + } + + req.Header.Set("Authorization", "Bearer "+authToken) + for header, value := range headers { + req.Header.Set(header, value) + } + + apiRes, err := c.Do(req) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrExecutingRequest, err) + } + defer apiRes.Body.Close() + + responseBody, err := io.ReadAll(apiRes.Body) + if err != nil { + return + } + + if apiRes.StatusCode >= http.StatusBadRequest { + var apiError Error + if err := json.Unmarshal(responseBody, &apiError); err != nil { + return nil, fmt.Errorf("status: %d, body: %s", apiRes.StatusCode, responseBody) + } + return nil, apiError + } + + return &Response{ + Response: apiRes, + Body: responseBody, + }, nil +} diff --git a/internal/api/cluster/cloud_provider.go b/internal/api/cluster/cloud_provider.go new file mode 100644 index 00000000..daadc12e --- /dev/null +++ b/internal/api/cluster/cloud_provider.go @@ -0,0 +1,25 @@ +package cluster + +// Defines values for CloudProviderType. +const ( + Aws CloudProviderType = "aws" + Azure CloudProviderType = "azure" + Gcp CloudProviderType = "gcp" +) + +// CloudProvider depicts where the cluster will be hosted. +// To learn more, see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). +type CloudProvider struct { + // Cidr block for Cloud Provider. + Cidr string `json:"cidr"` + + // Region is cloud provider region, e.g. 'us-west-2'. For information about supported regions, + // see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + Region string `json:"region"` + + // Type is cloud provider type, either 'AWS', 'GCP', or 'Azure'. + Type CloudProviderType `json:"type"` +} + +// CloudProviderType is cloud provider type, either 'AWS', 'GCP', or 'Azure'. +type CloudProviderType string diff --git a/internal/api/cluster/cluster.go b/internal/api/cluster/cluster.go new file mode 100644 index 00000000..ff7d08af --- /dev/null +++ b/internal/api/cluster/cluster.go @@ -0,0 +1,86 @@ +package cluster + +import ( + "terraform-provider-capella/internal/api" + + "github.com/google/uuid" +) + +// Availability defines model for Availability. +type Availability struct { + // Type is availability zone type, either 'single' or 'multi'. + Type AvailabilityType `json:"type"` +} + +// AvailabilityType is availability zone type, either 'single' or 'multi'. +type AvailabilityType string + +// CreateClusterRequest defines model for CreateClusterRequest. +type CreateClusterRequest struct { + Availability Availability `json:"availability"` + + // CloudProvider is the cloud provider where the cluster will be hosted. + // To learn more, see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + CloudProvider CloudProvider `json:"cloudProvider"` + CouchbaseServer *CouchbaseServer `json:"couchbaseServer,omitempty"` + + // Description depicts description of the cluster (up to 1024 characters). + Description *string `json:"description,omitempty"` + + // Name is the name of the cluster (up to 256 characters). + Name string `json:"name"` + + // ServiceGroups is the couchbase service groups to be run. At least one + // service group must contain the data service. + ServiceGroups []ServiceGroup `json:"serviceGroups"` + Support Support `json:"support"` +} + +// CreateClusterResponse defines model for CreateClusterResponse. +type CreateClusterResponse struct { + // Id The ID of the cluster created. + Id uuid.UUID `json:"id"` +} + +// GetClusterResponse defines model for GetClusterResponse. +type GetClusterResponse struct { + // AppServiceId is the ID of the linked app service. + AppServiceId *uuid.UUID `json:"appServiceId,omitempty"` + Audit api.CouchbaseAuditData `json:"audit"` + Availability Availability `json:"availability"` + + // CloudProvider is the cloud provider where the cluster will be hosted. To learn more, + // see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + CloudProvider CloudProvider `json:"cloudProvider"` + CouchbaseServer CouchbaseServer `json:"couchbaseServer"` + CurrentState State `json:"currentState"` + + // Description depicts description of the cluster (up to 1024 characters). + Description string `json:"description"` + + // Id is the ID of the cluster created. + Id uuid.UUID `json:"id"` + + // Name Name of the cluster (up to 256 characters). + Name string `json:"name"` + ServiceGroups []ServiceGroup `json:"serviceGroups"` + Support Support `json:"support"` + + Etag string +} + +// UpdateClusterRequest defines model for UpdateClusterRequest. +type UpdateClusterRequest struct { + // Description is the new cluster description (up to 1024 characters). + Description string `json:"description"` + + // Name is the new name of the cluster (up to 256 characters). + Name string `json:"name"` + ServiceGroups []ServiceGroup `json:"serviceGroups"` + Support Support `json:"support"` +} + +// GetClustersResponse defines the model for a GetClustersResponse. +type GetClustersResponse struct { + Data []GetClusterResponse `json:"data"` +} diff --git a/internal/api/cluster/node.go b/internal/api/cluster/node.go new file mode 100644 index 00000000..ddce77b3 --- /dev/null +++ b/internal/api/cluster/node.go @@ -0,0 +1,114 @@ +package cluster + +import ( + "encoding/json" +) + +// Node defines model for Node. +type Node struct { + // Compute Following are the supported compute combinations for CPU + // and RAM for different cloud providers. To learn more, + // see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + Compute Compute `json:"compute"` + Disk json.RawMessage `json:"disk"` +} + +// Compute Following are the supported compute combinations for CPU +// and RAM for different cloud providers. To learn more, +// see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). +type Compute struct { + // Cpu depicts cpu units (cores). + Cpu int `json:"cpu"` + + // Ram depicts ram units (GB). + Ram int `json:"ram"` +} + +// DiskAWS defines model for DiskAWS. +type DiskAWS struct { + // Iops Please refer to documentation for supported IOPS. + Iops int `json:"iops"` + + // Storage depicts storage in GB. See documentation for supported storage. + Storage int `json:"storage"` + + // Type depicts type of disk. Please choose from the given list for + // AWS cloud provider. + Type DiskAWSType `json:"type"` +} + +// DiskAWSType depicts type of disk. Please choose from the given list +// for AWS cloud provider. +type DiskAWSType string + +// DiskAzure defines model for DiskAzure. +type DiskAzure struct { + // Iops is required for ultra disk types. Please refer to documentation + // for supported IOPS. + Iops *int `json:"iops,omitempty"` + + // Storage depicts storage in GB. Required for ultra disk types. + // Please refer to documentation for supported storage. + Storage *int `json:"storage,omitempty"` + + // Type depicts type of disk. Please choose from the given list + // for Azure cloud provider. + Type DiskAzureType `json:"type"` +} + +// DiskAzureType depicts type of disk. Please choose from the given list for Azure cloud provider. +type DiskAzureType string + +// DiskGCP defines model for DiskGCP. +type DiskGCP struct { + // Storage is storage in GB. Please refer to documentation for supported storage. + Storage int `json:"storage"` + + // Type is type of disk. Please choose from the given list for GCP cloud provider. + Type DiskGCPType `json:"type"` +} + +// DiskGCPType is type of disk. Please choose from the given list for GCP cloud provider. +type DiskGCPType string + +// AsDiskAWS returns the disk data as a DiskAWS +func (n *Node) AsDiskAWS() (DiskAWS, error) { + var body DiskAWS + err := json.Unmarshal(n.Disk, &body) + return body, err +} + +// FromDiskAWS overwrites any disk data inside as the provided DiskAWS +func (n *Node) FromDiskAWS(v DiskAWS) error { + b, err := json.Marshal(v) + n.Disk = b + return err +} + +// AsDiskAzure returns the disk data as a DiskAzure +func (n *Node) AsDiskAzure() (DiskAzure, error) { + var body DiskAzure + err := json.Unmarshal(n.Disk, &body) + return body, err +} + +// FromDiskAzure overwrites any disk data as the provided DiskAzure +func (n *Node) FromDiskAzure(v DiskAzure) error { + b, err := json.Marshal(v) + n.Disk = b + return err +} + +// AsDiskGCP returns the disk data as a DiskGCP +func (n *Node) AsDiskGCP() (DiskGCP, error) { + var body DiskGCP + err := json.Unmarshal(n.Disk, &body) + return body, err +} + +// FromDiskGCP overwrites any disk data as the provided DiskGCP +func (n *Node) FromDiskGCP(v DiskGCP) error { + b, err := json.Marshal(v) + n.Disk = b + return err +} diff --git a/internal/api/cluster/server.go b/internal/api/cluster/server.go new file mode 100644 index 00000000..af6dc12c --- /dev/null +++ b/internal/api/cluster/server.go @@ -0,0 +1,57 @@ +package cluster + +// Service defines model for Service. +type Service string + +// ServiceGroup The set of nodes that share the same disk, +// number of nodes and services. +type ServiceGroup struct { + Node *Node `json:"node,omitempty"` + + // NumOfNodes is the number of nodes. The minimum number of + // nodes for the cluster can be 3 and maximum can be 27 nodes. + // Additional service groups can have 2 nodes minimum and 24 nodes maximum. + NumOfNodes *int `json:"numOfNodes,omitempty"` + + // Services is the couchbase service to run on the node. + Services *[]Service `json:"services,omitempty"` +} + +// CouchbaseServer defines model for CouchbaseServer. +type CouchbaseServer struct { + // Version is version of the Couchbase Server to be installed + // in the cluster. Refer to documentation + // [here](https://docs.couchbase.com/cloud/clusters/upgrade-database.html#server-version-maintenance-support) + // for list of supported versions. The latest Couchbase Server version + // will be deployed by default. + Version *string `json:"version,omitempty"` +} + +// Contains checks whether passed element presents in array or not +func Contains[T comparable](s []T, e T) bool { + for _, r := range s { + if r == e { + return true + } + } + return false +} + +// AreEqual returns true if the two arrays contain the same elements, without any extra values, False otherwise. +func AreEqual[T comparable](array1 []T, array2 []T) bool { + if len(array1) != len(array2) { + return false + } + set1 := make(map[T]bool) + for _, element := range array1 { + set1[element] = true + } + + for _, element := range array2 { + if !set1[element] { + return false + } + } + + return len(set1) == len(array1) +} diff --git a/internal/api/cluster/state.go b/internal/api/cluster/state.go new file mode 100644 index 00000000..fb5874de --- /dev/null +++ b/internal/api/cluster/state.go @@ -0,0 +1,47 @@ +package cluster + +// Defines values for State. +const ( + Degraded State = "degraded" + Deploying State = "deploying" + DeploymentFailed State = "deploymentFailed" + DestroyFailed State = "destroyFailed" + Destroying State = "destroying" + Draft State = "draft" + Healthy State = "healthy" + Offline State = "offline" + Peering State = "peering" + PeeringFailed State = "peeringFailed" + RebalanceFailed State = "rebalanceFailed" + Rebalancing State = "rebalancing" + ScaleFailed State = "scaleFailed" + Scaling State = "scaling" + TurnedOff State = "turnedOff" + TurningOff State = "turningOff" + TurningOffFailed State = "turningOffFailed" + TurningOn State = "turningOn" + TurningOnFailed State = "turningOnFailed" + UpgradeFailed State = "upgradeFailed" + Upgrading State = "upgrading" +) + +// State defines model for State. +type State string + +// IsFinalState checks whether cluster is successfully deployed/updated or not while creation/updation +//TODO: Degraded, draft, peeringFailed, turningOffFailed, and turningOnFailed are not known when it occurs and What happens if rebalancing fails? Will it retry?" + +func IsFinalState(state State) bool { + //"""Returns True if the state is critical, False otherwise.""" + finalStates := []State{ + Healthy, + Degraded, + DeploymentFailed, + DestroyFailed, + PeeringFailed, + RebalanceFailed, + ScaleFailed, + UpgradeFailed, + } + return Contains(finalStates, state) +} diff --git a/internal/api/cluster/support.go b/internal/api/cluster/support.go new file mode 100644 index 00000000..06d66ba7 --- /dev/null +++ b/internal/api/cluster/support.go @@ -0,0 +1,16 @@ +package cluster + +// Support defines model for Support. +type Support struct { + // Plan is plan type, either 'Basic', 'Developer Pro', or 'Enterprise'. + Plan SupportPlan `json:"plan"` + + // Timezone is the standard timezone for the cluster. Should be the TZ identifier. + Timezone SupportTimezone `json:"timezone"` +} + +// SupportPlan is plan type, either 'Basic', 'Developer Pro', or 'Enterprise'. +type SupportPlan string + +// SupportTimezone is the standard timezone for the cluster. Should be the TZ identifier. +type SupportTimezone string diff --git a/internal/api/database_credential.go b/internal/api/database_credential.go new file mode 100644 index 00000000..5ee5bcd2 --- /dev/null +++ b/internal/api/database_credential.go @@ -0,0 +1,71 @@ +package api + +import ( + "github.com/google/uuid" +) + +// Access defines the level of access that the database credential will have across buckets and scopes. +// This access is currently defined for all buckets and all scopes in the cluster. +// todo: Support for granular access per bucket and per scope will be added in AV-62864 +type Access struct { + Privileges []string `json:"privileges"` + // Resources is the level at which the above privileges are defined. + // Ex: Access of read/write privilege can be defined at the bucket level or scope level resource. + Resources *AccessibleResources `json:"resources,omitempty"` +} + +// CreateDatabaseCredentialRequest represents the schema for the POST Capella V4 API request that creates the database credential. +// Password is an optional field, if not passed, the password for the database credential is auto-generated. +type CreateDatabaseCredentialRequest struct { + Name string `json:"name"` + Password string `json:"password,omitempty"` + Access []Access `json:"access"` +} + +// AccessibleResources is the level at which the above privileges are defined. +// Ex: Access of read/write privilege can be defined at the bucket level or scope level resource. +type AccessibleResources struct { + // Buckets contains the details of all buckets with scope and collection level information to which the access applies. + Buckets []Bucket `json:"buckets"` +} + +// Bucket contains the details of a single bucket with scope and collection level information. +// Scopes can be a subset of all scopes inside the bucket, since this is defined only to govern the access. +type Bucket struct { + Name string `json:"name"` + // Scopes is the details of the scopes inside the bucket to which we want to apply access privileges. + Scopes []Scope `json:"scopes,omitempty"` +} + +// Scope is the details of a single scope inside the bucket, and it contains the collections details too. +// This collections can be a subset of all collections inside the scope, since this is defined only to govern the access. +type Scope struct { + Name string `json:"name"` + Collections []string `json:"collections,omitempty"` +} + +// CreateDatabaseCredentialResponse represents the schema for the POST Capella V4 API response that creates the database credential. +type CreateDatabaseCredentialResponse struct { + Id uuid.UUID `json:"id"` + Password string `json:"password"` +} + +// GetDatabaseCredentialResponse represents the schema for the GET Capella V4 API request that fetches the database credential details. +type GetDatabaseCredentialResponse struct { + // Audit contains all audit-related fields. + Audit CouchbaseAuditData `json:"audit"` + + // Id A GUID4 identifier of the project. + Id uuid.UUID `json:"id"` + + Name string `json:"name"` + + Access []Access `json:"access"` +} + +// PutDatabaseCredentialRequest represents the schema for the PUT Capella V4 API request that updates an existing database credential. +// Password is an optional field, if not passed, the existing password is not updated. +type PutDatabaseCredentialRequest struct { + Password string `json:"password,omitempty"` + Access []Access `json:"access"` +} diff --git a/internal/api/error.go b/internal/api/error.go new file mode 100644 index 00000000..e1d1c2de --- /dev/null +++ b/internal/api/error.go @@ -0,0 +1,25 @@ +package api + +import ( + "encoding/json" + "fmt" +) + +type Error struct { + Code int `json:"code"` + Hint string `json:"hint"` + HttpStatusCode int `json:"httpStatusCode"` + Message string `json:"message"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%s", e.Message) +} + +func (e Error) CompleteError() string { + jsonData, err := json.Marshal(e) + if err != nil { + return e.Message + } + return string(jsonData) +} diff --git a/internal/api/organization/organization.go b/internal/api/organization/organization.go new file mode 100644 index 00000000..f76af1a6 --- /dev/null +++ b/internal/api/organization/organization.go @@ -0,0 +1,24 @@ +package organization + +import ( + "github.com/google/uuid" + "terraform-provider-capella/internal/api" +) + +// GetOrganizationResponse defines the model for GetOrganizationResponse. +type GetOrganizationResponse struct { + // Audit contains all audit-related fields. + Audit api.CouchbaseAuditData `json:"audit"` + + // Name represents the organization name. + Name string `json:"name"` + + // Description is a short description of the organization. + Description *string `json:"description"` + + // Preferences stores preferences for the tenant. + Preferences *Preferences `json:"preferences"` + + // ID is the ID of the Organization + Id uuid.UUID `json:"id"` +} diff --git a/internal/api/organization/preferences.go b/internal/api/organization/preferences.go new file mode 100644 index 00000000..26febc35 --- /dev/null +++ b/internal/api/organization/preferences.go @@ -0,0 +1,6 @@ +package organization + +type Preferences struct { + // SessionDuration: Maximum allowed time in seconds inside the tenant for a user. + SessionDuration *int32 `json:"sessionDuration"` +} diff --git a/internal/api/project.go b/internal/api/project.go new file mode 100644 index 00000000..216219ff --- /dev/null +++ b/internal/api/project.go @@ -0,0 +1,51 @@ +package api + +import ( + "github.com/google/uuid" +) + +// CreateProjectRequest defines model for CreateProjectRequest. +type CreateProjectRequest struct { + // Description A short description about the project. + Description string `json:"description,omitempty"` + + // Name The name of the project. + Name string `json:"name"` +} + +// CreateProjectResponse defines model for CreateProjectResponse. +type CreateProjectResponse struct { + // Id The ID of the project created. + Id uuid.UUID `json:"id"` +} + +// GetProjectResponse defines model for GetProjectResponse. +type GetProjectResponse struct { + // Audit contains all audit-related fields. + Audit CouchbaseAuditData `json:"audit"` + + // Description The description of a particular project. + Description string `json:"description"` + + // Id A GUID4 identifier of the project. + Id uuid.UUID `json:"id"` + + // Name The name of the project. + Name string `json:"name"` + + Etag string +} + +// PutProjectRequest defines the model for a PutProjectRequest. +type PutProjectRequest struct { + // Description represents a short description of the project. + Description string `json:"description,omitempty"` + + // Name is the name of the project. + Name string `json:"name"` +} + +// GetProjectsResponse defines the model for a GetProjectsResponse. +type GetProjectsResponse struct { + Data []GetProjectResponse `json:"data"` +} diff --git a/internal/api/user.go b/internal/api/user.go new file mode 100644 index 00000000..574ac800 --- /dev/null +++ b/internal/api/user.go @@ -0,0 +1,87 @@ +package api + +import "github.com/google/uuid" + +// CreateUserRequest defines the model for CreateUserRequest +type CreateUserRequest struct { + // Name represents the name of the user. + Name string `json:"name"` + + // Email represents the email of the user. + Email string `json:"email"` + + // OrganizationRoles is an array of strings representing the roles granted to the user. + OrganizationRoles []string `json:"organizationRoles"` + + // Resources is an array of objects representing the resources the user has access to. + Resources []Resource `json:"resources"` +} + +// CreateUserResponse defines the model for CreateUserResponse. +type CreateUserResponse struct { + // ID is the ID of the user + Id uuid.UUID `json:"id"` +} + +// Response defines the model for a resource. +type Resource struct { + // Id is a GUID4 identifier of the resource. + Id string `json:"id"` + + // Type is the type of the resource. + Type *string `json:"type"` + + // Roles is an array of strings representing a users project roles + Roles []string `json:"roles"` +} + +// GetUserResponse defines the model for GetUserResponse. +type GetUserResponse struct { + // ID is the ID of the user + Id uuid.UUID `json:"id"` + + // Name represents the name of the user. + Name *string `json:"name"` + + // Email represents the email of the user. + Email string `json:"email"` + + // Status depicts whether the user is verified or not + Status string `json:"status"` + + // Inactive depicts whether the user has accepted the invite for the organization. + Inactive bool `json:"inactive"` + + // OrganizationId is a GUID4 identifier of the tenant. + OrganizationId uuid.UUID `json:"organizationId"` + + // OrganizationRoles is an array of strings representing the roles granted to the user. + OrganizationRoles []string `json:"organizationRoles"` + + // LastLogin is the time(UTC) at which user last logged in. + LastLogin string `json:"lastLogin"` + + // Region is the region of the user. + Region string `json:"region"` + + // TimeZone is the time zone of the user. + TimeZone string `json:"timeZone"` + + // EnableNotifications represents whether email alerts for databases in projects + // will be recieved. + EnableNotifications bool `json:"enableNotifications"` + + // ExpiresAt is the time at which user expires. + ExpiresAt string `json:"expiresAt"` + + // Resources is an array of objects representing the resources the user has access to. + Resources []Resource `json:"resources"` + + // Audit contains all audit-related fields. + Audit CouchbaseAuditData `json:"audit"` +} + +// GetUsersReponse defines the model for GetUsersResponse +type GetUsersResponse struct { + Data []GetUserResponse `json:"data"` +} diff --git a/internal/datasources/allowlist.go b/internal/datasources/allowlist.go new file mode 100644 index 00000000..82506bf1 --- /dev/null +++ b/internal/datasources/allowlist.go @@ -0,0 +1,207 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &AllowList{} + _ datasource.DataSourceWithConfigure = &AllowList{} +) + +// AllowList is the allow list data source implementation. +type AllowList struct { + *providerschema.Data +} + +// NewAllowList is a helper function to simplify the provider implementation. +func NewAllowList() datasource.DataSource { + return &AllowList{} +} + +// Metadata returns the allow list data source type name. +func (d *AllowList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_allowlist" +} + +// Schema defines the schema for the allowlist data source. +func (d *AllowList) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "project_id": requiredStringAttribute, + "cluster_id": requiredStringAttribute, + "data": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "organization_id": computedStringAttribute, + "project_id": computedStringAttribute, + "cluster_id": computedStringAttribute, + "cidr": computedStringAttribute, + "comment": computedStringAttribute, + "expires_at": computedStringAttribute, + "audit": computedAuditAttribute, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data of allowlists. +func (d *AllowList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.AllowLists + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Validate state is not empty + err := d.validate(state) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella AllowLists", + "Could not read allow lists in cluster "+state.ClusterId.String()+": "+err.Error(), + ) + return + } + + var ( + organizationId = state.OrganizationId.ValueString() + projectId = state.ProjectId.ValueString() + clusterId = state.ClusterId.ValueString() + ) + + // Make request to list allowlists + response, err := d.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/allowedcidrs", d.HostURL, organizationId, projectId, clusterId), + http.MethodGet, + nil, + d.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Reading Capella AllowLists", + "Could not read allow lists in cluster "+state.ClusterId.String()+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading AllowLists", + "Could not read allow lists in cluster "+state.ClusterId.String()+": "+err.Error(), + ) + return + } + + allowListsResponse := api.GetAllowListsResponse{} + err = json.Unmarshal(response.Body, &allowListsResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error reading allowlist", + "Could not create allowlist, unexpected error: "+err.Error(), + ) + return + } + + state = d.mapResponseBody(allowListsResponse, &state) + if err != nil { + resp.Diagnostics.AddError( + "Error reading allowlist", + "Could not create allowlist, unexpected error: "+err.Error(), + ) + return + } + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Configure adds the provider configured client to the allowlist data source. +func (d *AllowList) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + d.Data = data +} + +// mapResponseBody is used to map the response body from a call to +// get allowlists to the allowlists schema that will be used by terraform. +func (d *AllowList) mapResponseBody( + allowListsResponse api.GetAllowListsResponse, + state *providerschema.AllowLists, +) providerschema.AllowLists { + for _, allowList := range allowListsResponse.Data { + allowListState := providerschema.OneAllowList{ + Id: types.StringValue(allowList.Id.String()), + OrganizationId: types.StringValue(state.OrganizationId.ValueString()), + ProjectId: types.StringValue(state.ProjectId.ValueString()), + ClusterId: types.StringValue(state.ClusterId.ValueString()), + Cidr: types.StringValue(allowList.Cidr), + Comment: types.StringValue(*allowList.Comment), + ExpiresAt: types.StringValue(*allowList.ExpiresAt), + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: types.StringValue(allowList.Audit.CreatedAt.String()), + CreatedBy: types.StringValue(allowList.Audit.CreatedBy), + ModifiedAt: types.StringValue(allowList.Audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(allowList.Audit.ModifiedBy), + Version: types.Int64Value(int64(allowList.Audit.Version)), + }, + } + state.Data = append(state.Data, allowListState) + } + return *state +} + +// validate is used to verify that all the fields in the datasource +// have been populated. +func (d *AllowList) validate(state providerschema.AllowLists) error { + if state.OrganizationId.IsNull() { + return errors.ErrOrganizationIdMissing + } + if state.ProjectId.IsNull() { + return errors.ErrProjectIdMissing + } + if state.ClusterId.IsNull() { + return errors.ErrClusterIdMissing + } + return nil +} diff --git a/internal/datasources/allowlist_test.go b/internal/datasources/allowlist_test.go new file mode 100644 index 00000000..4d27f388 --- /dev/null +++ b/internal/datasources/allowlist_test.go @@ -0,0 +1,184 @@ +package datasources + +import ( + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + "testing" + + "time" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stretchr/testify/assert" +) + +func Test_MapResponseBody(t *testing.T) { + var ( + organizationId = basetypes.NewStringValue(uuid.New().String()) + projectId = basetypes.NewStringValue(uuid.New().String()) + clusterId = basetypes.NewStringValue(uuid.New().String()) + + cidr = "0.0.0.0/10" + comment = "comment" + expiresAt = "2023-09-26T19:20:30+01:00" + id = uuid.New() + + createdAt = time.Now() + createdBy = "user" + modifiedAt = time.Now() + modifiedBy = "user" + version = 1 + + allowList = api.GetAllowListResponse{ + Audit: api.CouchbaseAuditData{ + CreatedAt: createdAt, + CreatedBy: createdBy, + ModifiedAt: modifiedAt, + ModifiedBy: modifiedBy, + Version: version, + }, + Cidr: cidr, + Comment: &comment, + ExpiresAt: &expiresAt, + Id: id, + } + + OneAllowList = providerschema.OneAllowList{ + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: basetypes.NewStringValue(createdAt.String()), + CreatedBy: basetypes.NewStringValue(createdBy), + ModifiedAt: basetypes.NewStringValue(modifiedAt.String()), + ModifiedBy: basetypes.NewStringValue(modifiedBy), + Version: basetypes.NewInt64Value(int64(version)), + }, + OrganizationId: basetypes.NewStringValue(organizationId.ValueString()), + ProjectId: basetypes.NewStringValue(projectId.ValueString()), + ClusterId: basetypes.NewStringValue(clusterId.ValueString()), + Cidr: basetypes.NewStringValue(cidr), + Comment: basetypes.NewStringValue(comment), + ExpiresAt: basetypes.NewStringValue(expiresAt), + Id: basetypes.NewStringValue(id.String()), + } + ) + + type test struct { + desc string + response api.GetAllowListsResponse + expectedState providerschema.AllowLists + } + + tests := []test{ + { + desc: "[POSITIVE] - Fields successfully populated - one allow list in response", + response: api.GetAllowListsResponse{ + Data: []api.GetAllowListResponse{ + allowList, + }, + }, + expectedState: providerschema.AllowLists{ + OrganizationId: organizationId, + ProjectId: projectId, + ClusterId: clusterId, + Data: []providerschema.OneAllowList{OneAllowList}, + }, + }, + { + desc: "[POSITIVE] - Fields successfully populated - multiple allow lists in response", + response: api.GetAllowListsResponse{ + Data: []api.GetAllowListResponse{ + allowList, + allowList, + }, + }, + expectedState: providerschema.AllowLists{ + OrganizationId: organizationId, + ProjectId: projectId, + ClusterId: clusterId, + Data: []providerschema.OneAllowList{ + OneAllowList, + OneAllowList, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + a := &AllowList{} + + state := providerschema.AllowLists{ + OrganizationId: organizationId, + ProjectId: projectId, + ClusterId: clusterId, + } + state = a.mapResponseBody(test.response, &state) + + assert.Equal(t, test.expectedState, state) + }) + } +} + +func Test_Validate(t *testing.T) { + var ( + organizationId = basetypes.NewStringValue("organizationId") + projectId = basetypes.NewStringValue("projectId") + clusterId = basetypes.NewStringValue("clusterId") + ) + + type test struct { + desc string + state providerschema.AllowLists + expectedErr error + } + + tests := []test{ + { + desc: "[POSITIVE] - All fields populated", + state: providerschema.AllowLists{ + OrganizationId: organizationId, + ProjectId: projectId, + ClusterId: clusterId, + }, + }, + { + desc: "[NEGATIVE] - OrganizationId is missing", + state: providerschema.AllowLists{ + ProjectId: projectId, + ClusterId: clusterId, + }, + expectedErr: errors.ErrOrganizationIdMissing, + }, + { + desc: "[NEGATIVE] - ProjectId is missing", + state: providerschema.AllowLists{ + OrganizationId: organizationId, + ClusterId: clusterId, + }, + expectedErr: errors.ErrProjectIdMissing, + }, + { + desc: "[NEGATIVE] - ClusterId is missing", + state: providerschema.AllowLists{ + OrganizationId: organizationId, + ProjectId: projectId, + }, + expectedErr: errors.ErrClusterIdMissing, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + a := &AllowList{} + + err := a.validate(test.state) + + if test.expectedErr != nil { + assert.Equal(t, test.expectedErr, err) + return + } + + assert.NoError(t, err) + }) + } +} diff --git a/internal/datasources/apikeys.go b/internal/datasources/apikeys.go new file mode 100644 index 00000000..78143716 --- /dev/null +++ b/internal/datasources/apikeys.go @@ -0,0 +1,172 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "terraform-provider-capella/internal/api" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &ApiKey{} + _ datasource.DataSourceWithConfigure = &ApiKey{} +) + +// ApiKey is the api key data source implementation. +type ApiKey struct { + *providerschema.Data +} + +// NewApiKey is a helper function to simplify the provider implementation. +func NewApiKey() datasource.DataSource { + return &ApiKey{} +} + +// Metadata returns the api key data source type name. +func (d *ApiKey) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_apikeys" +} + +// Schema defines the schema for the api key data source. +func (d *ApiKey) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "data": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "organization_id": computedStringAttribute, + "name": computedStringAttribute, + "description": computedStringAttribute, + "expiry": computedFloat64Attribute, + "allowed_cidrs": computedListAttribute, + "organization_roles": computedListAttribute, + "resources": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "roles": computedListAttribute, + "type": computedStringAttribute, + }, + }, + }, + "audit": computedAuditAttribute, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data of api keys. +func (d *ApiKey) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.ApiKeys + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Api Keys in Capella", + "Could not read Capella api keys in organization "+organizationId+": "+err.Error(), + ) + return + } + + response, err := d.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/apikeys", d.HostURL, organizationId), + http.MethodGet, + nil, + d.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella ApiKeys", + "Could not read api keys in organization "+organizationId+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella ApiKeys", + "Could not read api keys in organization "+organizationId+": "+err.Error(), + ) + return + } + + apiKeyResp := api.GetApiKeysResponse{} + err = json.Unmarshal(response.Body, &apiKeyResp) + if err != nil { + resp.Diagnostics.AddError( + "Error listing ApiKeys", + "Could not list api keys, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to model + for _, apiKey := range apiKeyResp.Data { + audit := providerschema.NewCouchbaseAuditData(apiKey.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + resp.Diagnostics.AddError( + "Error listing ApiKeys", + fmt.Sprintf("Could not list api keys, unexpected error: %s", fmt.Errorf("error while audit conversion")), + ) + return + } + newApiKeyData, err := providerschema.NewApiKeyData(&apiKey, organizationId, auditObj) + if err != nil { + resp.Diagnostics.AddError( + "Error listing ApiKeys", + fmt.Sprintf("Could not list api keys, unexpected error: %s", err.Error()), + ) + return + } + state.Data = append(state.Data, newApiKeyData) + } + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure adds the provider configured client to the api key data source. +func (d *ApiKey) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.Data = data +} diff --git a/internal/datasources/attributes.go b/internal/datasources/attributes.go new file mode 100644 index 00000000..89ddb021 --- /dev/null +++ b/internal/datasources/attributes.go @@ -0,0 +1,58 @@ +package datasources + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var ( + // computedStringAttribute returns a Terraform schema attribute + // which is configured to be computed. + computedStringAttribute = schema.StringAttribute{ + Computed: true, + } + + // requiredStringAttribute returns a Terraform schema attribute + // which is configured to be required. + requiredStringAttribute = schema.StringAttribute{ + Required: true, + } + + // computedBoolAttribute returns a Terraform schema attribute + // which is configured to be computed. + computedBoolAttribute = schema.BoolAttribute{ + Computed: true, + } + + // computedBoolAttribute returns a Terraform schema attribute + // which is configured to be computed. + computedInt64Attribute = schema.Int64Attribute{ + Computed: true, + } + + // computedBoolAttribute returns a Terraform schema attribute + // which is configured to be computed. + computedFloat64Attribute = schema.Float64Attribute{ + Computed: true, + } + + // computedListAttribute returns a Terraform list schema attribute + // which is configured to be computed and of type string. + computedListAttribute = schema.ListAttribute{ + ElementType: types.StringType, + Computed: true, + } + + // computedAuditAttribute retuns a SingleNestedAttribute to + // represent couchbase audit data using terraform schema types. + computedAuditAttribute = schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "created_at": computedStringAttribute, + "created_by": computedStringAttribute, + "modified_at": computedStringAttribute, + "modified_by": computedStringAttribute, + "version": computedInt64Attribute, + }, + } +) diff --git a/internal/datasources/certificate.go b/internal/datasources/certificate.go new file mode 100644 index 00000000..bfd79ca1 --- /dev/null +++ b/internal/datasources/certificate.go @@ -0,0 +1,151 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &Certificate{} + _ datasource.DataSourceWithConfigure = &Certificate{} +) + +// Certificate is the certificate data source implementation. +type Certificate struct { + *providerschema.Data +} + +// NewCertificate is a helper function to simplify the provider implementation. +func NewCertificate() datasource.DataSource { + return &Certificate{} +} + +// Metadata returns the certificates data source type name. +func (c *Certificate) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_certificates" +} + +// Schema defines the schema for the allowlist data source. +func (c *Certificate) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "project_id": requiredStringAttribute, + "cluster_id": requiredStringAttribute, + "certificate": computedStringAttribute, + }, + } +} + +// Read refreshes the Terraform state with the latest data of projects. +func (c *Certificate) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.Certificate + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Validate state is not empty + err := c.validate(state) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella Certificate", + "Could not read certificate in cluster "+state.ClusterId.String()+": "+err.Error(), + ) + return + } + + var ( + organizationId = state.OrganizationId.ValueString() + projectId = state.ProjectId.ValueString() + clusterId = state.ClusterId.ValueString() + ) + + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/certificates", c.HostURL, organizationId, projectId, clusterId), + http.MethodGet, + nil, + c.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Certificate", + "Could not read certificate in cluster "+state.ClusterId.String()+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Certificate", + "Could not read certificate in cluster "+state.ClusterId.String()+": "+err.Error(), + ) + return + } + + certResp := api.GetCertificateResponse{} + err = json.Unmarshal(response.Body, &certResp) + if err != nil { + resp.Diagnostics.AddError( + "Error reading certificate", + "Could not read certificate in cluster, unexpected error: "+err.Error(), + ) + return + } + + state.Certificate = types.StringValue(certResp.Certificate) + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure adds the provider configured client to the project data source. +func (c *Certificate) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + c.Data = data +} + +// validate is used to verify that all the fields in the datasource +// have been populated. +func (c *Certificate) validate(state providerschema.Certificate) error { + if state.OrganizationId.IsNull() { + return errors.ErrOrganizationIdMissing + } + if state.ProjectId.IsNull() { + return errors.ErrProjectIdMissing + } + if state.ClusterId.IsNull() { + return errors.ErrClusterIdMissing + } + return nil +} diff --git a/internal/datasources/cluster.go b/internal/datasources/cluster.go new file mode 100644 index 00000000..77aa3fb2 --- /dev/null +++ b/internal/datasources/cluster.go @@ -0,0 +1,155 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "terraform-provider-capella/internal/api" + clusterapi "terraform-provider-capella/internal/api/cluster" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &Cluster{} + _ datasource.DataSourceWithConfigure = &Cluster{} +) + +// Cluster is the Cluster data source implementation. +type Cluster struct { + *providerschema.Data +} + +// NewCluster is a helper function to simplify the provider implementation. +func NewCluster() datasource.DataSource { + return &Cluster{} +} + +// Metadata returns the cluster data source type name. +func (d *Cluster) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_clusters" +} + +// Schema defines the schema for the Cluster data source. +func (d *Cluster) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = ClusterSchema() +} + +// Read refreshes the Terraform state with the latest data of clusters. +func (d *Cluster) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.Clusters + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if state.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: organization ID cannot be empty.", + ) + return + } + + if state.ProjectId.IsNull() { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: project ID cannot be empty.", + ) + return + } + + var ( + organizationId = state.OrganizationId.ValueString() + projectId = state.ProjectId.ValueString() + ) + + response, err := d.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters", d.HostURL, organizationId, projectId), + http.MethodGet, + nil, + d.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Clusters", + fmt.Sprintf("Could not read clusters in organization %s and project %s, unexpected error: %s", organizationId, projectId, err.CompleteError()), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Clusters", + fmt.Sprintf("Could not read clusters in organization %s and project %s, unexpected error: %s", organizationId, projectId, err.Error()), + ) + return + } + + clusterResp := clusterapi.GetClustersResponse{} + err = json.Unmarshal(response.Body, &clusterResp) + if err != nil { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to model + for _, cluster := range clusterResp.Data { + audit := providerschema.NewCouchbaseAuditData(cluster.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + resp.Diagnostics.AddError( + "Error Reading Capella Clusters", + fmt.Sprintf("Could not read clusters in organization %s and project %s, unexpected error: %s", organizationId, projectId, errors.ErrUnableToConvertAuditData), + ) + } + + newClusterData, err := providerschema.NewClusterData(&cluster, organizationId, projectId, auditObj) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella Clusters", + fmt.Sprintf("Could not read clusters in organization %s and project %s, unexpected error: %s", organizationId, projectId, err.Error()), + ) + } + state.Data = append(state.Data, *newClusterData) + } + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure adds the provider configured client to the cluster data source. +func (d *Cluster) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.Data = data +} diff --git a/internal/datasources/cluster_schema.go b/internal/datasources/cluster_schema.go new file mode 100644 index 00000000..72c2efa4 --- /dev/null +++ b/internal/datasources/cluster_schema.go @@ -0,0 +1,85 @@ +package datasources + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func ClusterSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "project_id": requiredStringAttribute, + "data": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "organization_id": computedStringAttribute, + "project_id": computedStringAttribute, + "name": computedStringAttribute, + "description": computedStringAttribute, + "cloud_provider": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "type": computedStringAttribute, + "region": computedStringAttribute, + "cidr": computedStringAttribute, + }, + }, + "couchbase_server": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "version": computedStringAttribute, + }, + }, + "service_groups": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": computedInt64Attribute, + "ram": computedInt64Attribute, + }, + }, + "disk": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "type": computedStringAttribute, + "storage": computedInt64Attribute, + "iops": computedInt64Attribute, + }, + }, + }, + }, + "num_of_nodes": computedInt64Attribute, + "services": computedListAttribute, + }, + }, + }, + "availability": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "type": computedStringAttribute, + }, + }, + "support": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "plan": computedStringAttribute, + "timezone": computedStringAttribute, + }, + }, + "current_state": computedStringAttribute, + "app_service_id": computedStringAttribute, + "audit": computedAuditAttribute, + }, + }, + }, + }, + } +} diff --git a/internal/datasources/organization.go b/internal/datasources/organization.go new file mode 100644 index 00000000..11597292 --- /dev/null +++ b/internal/datasources/organization.go @@ -0,0 +1,179 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/api/organization" + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + providerschema "terraform-provider-capella/internal/schema" +) + +var ( + _ datasource.DataSource = &Organization{} + _ datasource.DataSourceWithConfigure = &Organization{} +) + +type Organization struct { + *providerschema.Data +} + +func NewOrganization() datasource.DataSource { + return &Organization{} +} + +func (o *Organization) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_organization" +} + +func (o *Organization) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "name": computedStringAttribute, + "description": computedStringAttribute, + "preferences": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "session_duration": computedInt64Attribute, + }, + }, + "audit": computedAuditAttribute, + }, + } +} + +func (o *Organization) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.Organization + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + err := o.validate(state) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella Organization", + "Could not read organization in cluster"+state.OrganizationId.String()+": "+err.Error()) + return + } + + var organizationId = state.OrganizationId.ValueString() + + // Make request to get organization + response, err := o.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s", o.HostURL, organizationId), + http.MethodGet, + nil, + o.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Reading Capella Organization", + "Could not read organization in cluster "+state.OrganizationId.String()+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Organization", + "Could not read organization in cluster "+state.OrganizationId.String()+": "+err.Error(), + ) + return + } + + organizationsResponse := organization.GetOrganizationResponse{} + err = json.Unmarshal(response.Body, &organizationsResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error reading organization", + "Could not create organization, unexpected error: "+err.Error(), + ) + return + } + + audit := providerschema.NewCouchbaseAuditData(organizationsResponse.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + resp.Diagnostics.AddError( + "Error while audit conversion", + "Could not perform audit conversion", + ) + return + } + + var preferences providerschema.Preferences + if organizationsResponse.Preferences != nil { + preferences = providerschema.NewPreferences(*organizationsResponse.Preferences) + } + + preferencesObj, diags := types.ObjectValueFrom(ctx, preferences.AttributeTypes(), preferences) + if diags.HasError() { + resp.Diagnostics.AddError( + "Error while preferences conversion", + "Could not perform preferences conversion", + ) + return + } + + orgState := providerschema.Organization{ + OrganizationId: types.StringValue(organizationsResponse.Id.String()), + Name: types.StringValue(organizationsResponse.Name), + Description: types.StringValue(*organizationsResponse.Description), + Audit: auditObj, + Preferences: preferencesObj, + } + state = orgState + + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// validate is used to verify that all the fields in the datasource +// have been populated. +func (o *Organization) validate(state providerschema.Organization) error { + if state.OrganizationId.IsNull() { + return errors.ErrOrganizationIdMissing + } + return nil +} + +// Configure adds the provider configured client to the organization data source. +func (o *Organization) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + o.Data = data +} diff --git a/internal/datasources/project.go b/internal/datasources/project.go new file mode 100644 index 00000000..007574cd --- /dev/null +++ b/internal/datasources/project.go @@ -0,0 +1,162 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "terraform-provider-capella/internal/api" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &Project{} + _ datasource.DataSourceWithConfigure = &Project{} +) + +// Project is the project data source implementation. +type Project struct { + *providerschema.Data +} + +// NewProject is a helper function to simplify the provider implementation. +func NewProject() datasource.DataSource { + return &Project{} +} + +// Metadata returns the project data source type name. +func (d *Project) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_projects" +} + +// Schema defines the schema for the project data source. +func (d *Project) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "data": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "organization_id": computedStringAttribute, + "name": computedStringAttribute, + "description": computedStringAttribute, + "audit": computedAuditAttribute, + "if_match": computedStringAttribute, + "etag": computedStringAttribute, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data of projects. +func (d *Project) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.Projects + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if state.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: organization ID cannot be empty.", + ) + return + } + var organizationId = state.OrganizationId.ValueString() + + response, err := d.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects", d.HostURL, organizationId), + http.MethodGet, + nil, + d.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read projects in organization "+state.OrganizationId.String()+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read projects in organization "+state.OrganizationId.String()+": "+err.Error(), + ) + return + } + + projectResp := api.GetProjectsResponse{} + err = json.Unmarshal(response.Body, &projectResp) + if err != nil { + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: "+err.Error(), + ) + return + } + + // Map response body to model + for _, project := range projectResp.Data { + projectState := providerschema.OneProject{ + Id: types.StringValue(project.Id.String()), + OrganizationId: types.StringValue(state.OrganizationId.ValueString()), + Name: types.StringValue(project.Name), + Description: types.StringValue(project.Description), + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: types.StringValue(project.Audit.CreatedAt.String()), + CreatedBy: types.StringValue(project.Audit.CreatedBy), + ModifiedAt: types.StringValue(project.Audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(project.Audit.ModifiedBy), + Version: types.Int64Value(int64(project.Audit.Version)), + }, + } + state.Data = append(state.Data, projectState) + } + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure adds the provider configured client to the project data source. +func (d *Project) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.Data = data +} diff --git a/internal/datasources/users.go b/internal/datasources/users.go new file mode 100644 index 00000000..9812c54c --- /dev/null +++ b/internal/datasources/users.go @@ -0,0 +1,222 @@ +package datasources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &Users{} + _ datasource.DataSourceWithConfigure = &Users{} +) + +// Users is the user data source implementation. +type Users struct { + *providerschema.Data +} + +// NewUsers is a helper function to simplify the provider implementation. +func NewUsers() datasource.DataSource { + return &Users{} +} + +// Metadata returns the user data source type name. +func (d *Users) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_users" +} + +// Schema defines the schema for the User data source. +func (d *Users) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "organization_id": requiredStringAttribute, + "data": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": computedStringAttribute, + "name": computedStringAttribute, + "status": computedStringAttribute, + "inactive": computedBoolAttribute, + "email": computedStringAttribute, + "organization_id": computedStringAttribute, + "organization_roles": computedListAttribute, + "last_login": computedStringAttribute, + "region": computedStringAttribute, + "time_zone": computedStringAttribute, + "enable_notifications": computedBoolAttribute, + "expires_at": computedStringAttribute, + "resources": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "type": computedStringAttribute, + "id": computedStringAttribute, + "roles": computedListAttribute, + }, + }, + }, + "audit": computedAuditAttribute, + }, + }, + }, + }, + } +} + +// Read refreshes the Terraform state with the latest data of Users. +func (d *Users) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var state providerschema.Users + diags := req.Config.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Validate state is not empty + err := d.validate(state) + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella Users", + "Could not read users in organization "+state.OrganizationId.String()+": "+err.Error(), + ) + return + } + + organizationId := state.OrganizationId.ValueString() + + // Make request to list Users + response, err := d.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/users", d.HostURL, organizationId), + http.MethodGet, + nil, + d.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Reading Capella Users", + "Could not read users in organization "+state.OrganizationId.String()+": "+err.CompleteError(), + ) + return + } + return + default: + resp.Diagnostics.AddError( + "Error Reading Users", + "Could not read users in organization "+state.OrganizationId.String()+": "+err.Error(), + ) + return + } + + UsersResponse := api.GetUsersResponse{} + err = json.Unmarshal(response.Body, &UsersResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error reading User", + "Could not create User, unexpected error: "+err.Error(), + ) + return + } + + state, err = d.mapResponseBody(ctx, UsersResponse, &state) + if err != nil { + resp.Diagnostics.AddError( + "Error reading User", + "Could not create User, unexpected error: "+err.Error(), + ) + return + } + + // Set state + diags = resp.State.Set(ctx, &state) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Configure adds the provider configured client to the User data source. +func (d *Users) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + d.Data = data +} + +// mapResponseBody is used to map the response body from a call to +// get Users to the Users schema that will be used by terraform. +func (d *Users) mapResponseBody( + ctx context.Context, + UsersResponse api.GetUsersResponse, + state *providerschema.Users, +) (providerschema.Users, error) { + for _, userResp := range UsersResponse.Data { + audit := providerschema.NewCouchbaseAuditData(userResp.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + return *state, fmt.Errorf("error occured while attempting to convert audit data") + } + + // Set Optional Values + var name string + if userResp.Name != nil { + name = *userResp.Name + } + + UserState := providerschema.NewUser( + types.StringValue(userResp.Id.String()), + types.StringValue(name), + types.StringValue(userResp.Email), + types.StringValue(userResp.Status), + types.BoolValue(userResp.Inactive), + types.StringValue(userResp.OrganizationId.String()), + providerschema.MorphOrganizationRoles(userResp.OrganizationRoles), + types.StringValue(userResp.LastLogin), + types.StringValue(userResp.Region), + types.StringValue(userResp.TimeZone), + types.BoolValue(userResp.EnableNotifications), + types.StringValue(userResp.ExpiresAt), + providerschema.MorphResources(userResp.Resources), + auditObj, + ) + + state.Data = append(state.Data, *UserState) + } + return *state, nil +} + +// validate is used to verify that all the fields in the datasource +// have been populated. +func (d *Users) validate(state providerschema.Users) error { + if state.OrganizationId.IsNull() { + return errors.ErrOrganizationIdMissing + } + return nil +} diff --git a/internal/errors/errors.go b/internal/errors/errors.go new file mode 100644 index 00000000..a701f765 --- /dev/null +++ b/internal/errors/errors.go @@ -0,0 +1,99 @@ +package errors + +import "errors" + +var ( + // ErrIdMissing is returned when an expected Id was not found after an import. + ErrIdMissing = errors.New("some ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrUserIdCannotBeEmpty is returned when a User Id was required for a request but was not included. + ErrUserIdCannotBeEmpty = errors.New("user ID cannot be empty, please contact Couchbase Capella Support") + + // ErrUserIdMissing is returned when an expected User Id was not found after an import. + ErrUserIdMissing = errors.New("user ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrAllowListIdCannotBeEmpty is returned when an AllowList Id was required for a request but was not included. + ErrAllowListIdCannotBeEmpty = errors.New("allowlist ID cannot be empty, please contact Couchbase Capella Support") + + // ErrAllowListIdMissing is returned when an expected AllowList Id was not found after an import. + ErrAllowListIdMissing = errors.New("allowList ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrClusterIdCannotBeEmpty is returned when a Cluster Id was required for a request but was not included. + ErrClusterIdCannotBeEmpty = errors.New("cluster ID cannot be empty, please contact Couchbase Capella Support") + + // ErrClusterIdMissing is returned when an expected Cluster Id was not found after an import. + ErrClusterIdMissing = errors.New("cluster ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrProjectIdCannotBeEmpty is returned when a Project Id was required for a request but was not included. + ErrProjectIdCannotBeEmpty = errors.New("project ID cannot be empty, please contact Couchbase Capella Support") + + // ErrProjectIdMissing is returned when an expected Project Id was not found after an import. + ErrProjectIdMissing = errors.New("project ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrUnableToUpdateProjectId is returned when an update to a projectId was unsuccessful. + ErrUnableToUpdateProjectId = errors.New("unable to update projectId") + + // ErrOrganizationIdCannotBeEmpty is returned when an Organization Id was required for a request but was not included. + ErrOrganizationIdCannotBeEmpty = errors.New("organization ID cannot be empty, please contact Couchbase Capella Support") + + // ErrOrganizationIdMissing is returned when an expected Organization Id was not found after an import. + ErrOrganizationIdMissing = errors.New("organization ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrUnableToUpdateOrganizationId is returned when an update to a projectId was unsuccessful. + ErrUnableToUpdateOrganizationId = errors.New("unable to update organizationId") + + // ErrDatabaseCredentialIdCannotBeEmpty is returned when a Database Credential Id was required for a request but was not included. + ErrDatabaseCredentialIdCannotBeEmpty = errors.New("database credential ID cannot be empty, please contact Couchbase Capella Support") + + // ErrDatabaseCredentialIdMissing is returned when an expected DatabaseCredential Id was not found after an import. + ErrDatabaseCredentialIdMissing = errors.New("database credential ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrEmailCannotBeEmpty is returned when an email address was required for a request but was not included. + ErrEmailCannotBeEmpty = errors.New("email cannot be empty, please contact Couchbase Capella Support") + + // ErrOrganizationRolesCannotBeEmpty is returned when organization roles were required for a request but were not included. + ErrOrganizationRolesCannotBeEmpty = errors.New("organization roles cannot be empty, please contact Couchbase Capella Support") + + // ErrUnableToUpdateServerVersion is returned when it is not possible to update the couchbase server version. + ErrUnableToUpdateServerVersion = errors.New("unable to update couchbase server version") + + // ErrUnableToUpdateAvailabilityType is returned when it is not possible to update the availability type. + ErrUnableToUpdateAvailabilityType = errors.New("unable to update availability type") + + // ErrUnableToUpdateCloudProvider is returned when when it is not possible to update the cloud provider. + ErrUnableToUpdateCloudProvider = errors.New("unable to update cloud provider") + + // ErrMarshallingPayload is returned when a payload has failed to marshal into a request body. + ErrMarshallingPayload = errors.New("failed to marshal payload") + + // ErrUnmarshallingResponse is returned when a HTTP response failrf to unmarshal + ErrUnmarshallingResponse = errors.New("failed to unmarshal response") + + // ErrConstructingRequest is returned when a HTTP.NewRequest has failed. + ErrConstructingRequest = errors.New("failed to construct request") + + // ErrExecutingRequest is returned when a HTTP request has failed to execute. + ErrExecutingRequest = errors.New("failed to execute request") + + // ErrUnableToConvertAuditData is returned when an attempt to convert audit data from + // terraform types.String to types string has failed. + ErrUnableToConvertAuditData = errors.New("failed to convert audit data") + + // ErrUnableToImportResource is returned when a resource failed to be imported. + ErrUnableToImportResource = errors.New("failed to import resource") + + // ErrUnsupportedCloudProvider is returned when an invalid cloud provider was requested. + ErrUnsupportedCloudProvider = errors.New("cloud provider is not supported") + + // ErrUnableToReadCapellaUser is returned when the provider failed to read a requested Capella user + ErrUnableToReadCapellaUser = errors.New("could not read Capella user, please contact Couchbase Capella Support") + + // ErrApiKeyIdCannotBeEmpty is returned when an ApiKeyId was required for a request but was not included. + ErrApiKeyIdCannotBeEmpty = errors.New("api key ID cannot be empty, please contact Couchbase Capella Support") + + // ErrApiKeyIdMissing is returned when an expected ErrApiKeyIdMissing was not found after an import. + ErrApiKeyIdMissing = errors.New("api key ID is missing or was passed incorrectly, please check provider documentation for syntax") + + // ErrBucketIdCannotBeEmpty is returned when an ApiKeyId was required for a request but was not included. + ErrBucketIdCannotBeEmpty = errors.New("bucket ID cannot be empty, please contact Couchbase Capella Support") +) diff --git a/internal/provider/provider.go b/internal/provider/provider.go new file mode 100644 index 00000000..2ad337db --- /dev/null +++ b/internal/provider/provider.go @@ -0,0 +1,192 @@ +package provider + +import ( + "context" + "terraform-provider-capella/internal/datasources" + "time" + + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/resources" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var _ provider.Provider = &capellaProvider{} + +const ( + capellaAuthenticationTokenField = "authentication_token" + capellaPublicAPIHostField = "host" + apiRequestTimeout = 10 * time.Second +) + +// capellaProvider is the provider implementation. +type capellaProvider struct { + name string + // version is set to the provider version on release, "dev" when the + // provider is built and ran locally, and "test" when running acceptance + // testing. + version string +} + +// New is a helper function to simplify provider server and testing implementation. +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &capellaProvider{ + name: "capella", + version: version, + } + } +} + +// Metadata returns the provider type name. +func (p *capellaProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = p.name + resp.Version = p.version +} + +// Schema defines the provider-level schema for configuration data. +func (p *capellaProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + capellaPublicAPIHostField: schema.StringAttribute{ + Required: true, + Description: "Capella Public API HTTPS Host URL", + }, + capellaAuthenticationTokenField: schema.StringAttribute{ + Required: true, + Sensitive: true, + Description: "Capella API Token that serves as an authentication mechanism.", + }, + }, + } +} + +// Configure configures the Capella client. +func (p *capellaProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + tflog.Info(ctx, "Configuring the Capella Client") + + // Retrieve provider data from configuration + var config providerschema.Config + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // If practitioner provided a configuration value for any of the + // attributes, it must be a known value. + + if config.Host.IsUnknown() { + resp.Diagnostics.AddAttributeError( + path.Root(capellaPublicAPIHostField), + "Unknown Capella API Host", + "The provider cannot create the capella API client as there is an unknown configuration value for the capella API host. "+ + "Either target apply the source of the value first, set the value statically in the configuration, or use the CAPELLA_HOST environment variable.", + ) + } + + if config.AuthenticationToken.IsUnknown() { + resp.Diagnostics.AddAttributeError( + path.Root(capellaAuthenticationTokenField), + "Unknown Capella Authentication Token", + "The provider cannot create the Capella API client as there is an unknown configuration value for the capella authentication token. "+ + "Either target apply the source of the value first, set the value statically in the configuration, or use the CAPELLA_AUTHENTICATION_TOKEN environment variable.", + ) + } + + if resp.Diagnostics.HasError() { + return + } + + // Set the host and authentication token to be used + + host := config.Host.ValueString() + authenticationToken := config.AuthenticationToken.ValueString() + + // If any of the expected configurations are missing, return + // error with provider-specific guidance. + if host == "" { + resp.Diagnostics.AddAttributeError( + path.Root(capellaPublicAPIHostField), + "Missing Capella Public API Host", + "The provider cannot create the Capella API client as there is a missing or empty value for the Capella API host. "+ + "Set the host value in the configuration or use the TF_VAR_host environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if authenticationToken == "" { + resp.Diagnostics.AddAttributeError( + path.Root(capellaAuthenticationTokenField), + "Missing Capella Authentication Token", + "The provider cannot create the Capella API client as there is a missing or empty value for the capella authentication token. "+ + "Set the password value in the configuration or use the TF_VAR_auth_token environment variable. "+ + "If either is already set, ensure the value is not empty.", + ) + } + + if resp.Diagnostics.HasError() { + return + } + + ctx = tflog.SetField(ctx, capellaPublicAPIHostField, host) + ctx = tflog.SetField(ctx, capellaAuthenticationTokenField, authenticationToken) + ctx = tflog.MaskFieldValuesWithFieldKeys(ctx, capellaAuthenticationTokenField) + + tflog.Debug(ctx, "Creating Capella client") + + // Create a new capella client using the configuration values + providerData := &providerschema.Data{ + HostURL: host, + Token: authenticationToken, + Client: api.NewClient(apiRequestTimeout), + } + + // Make the Capella client available during DataSource and Resource + // type Configure methods. + // + // DataSourceData is provider-defined data, clients, etc. that is passed + // to [datasource.ConfigureRequest.ProviderData] for each DataSource type + // that implements the Configure method. + resp.DataSourceData = providerData + // ResourceData is provider-defined data, clients, etc. that is passed + // to [resource.ConfigureRequest.ProviderData] for each Resource type + // that implements the Configure method. + resp.ResourceData = providerData + + tflog.Info(ctx, "Configured Capella client", map[string]any{"success": true}) + +} + +// DataSources defines the data sources implemented in the provider. +func (p *capellaProvider) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + datasources.NewProject, + datasources.NewAllowList, + datasources.NewCertificate, + datasources.NewOrganization, + datasources.NewCluster, + datasources.NewUsers, + datasources.NewApiKey, + } +} + +// Resources defines the resources implemented in the provider. +func (p *capellaProvider) Resources(_ context.Context) []func() resource.Resource { + return []func() resource.Resource{ + resources.NewProject, + resources.NewCluster, + resources.NewAllowList, + resources.NewDatabaseCredential, + resources.NewBucket, + resources.NewUser, + resources.NewApiKey, + } +} diff --git a/internal/resources/allowlist.go b/internal/resources/allowlist.go new file mode 100644 index 00000000..1df2deb4 --- /dev/null +++ b/internal/resources/allowlist.go @@ -0,0 +1,328 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &AllowList{} + _ resource.ResourceWithConfigure = &AllowList{} + _ resource.ResourceWithImportState = &AllowList{} +) + +// AllowList is the AllowList resource implementation. +type AllowList struct { + *providerschema.Data +} + +func NewAllowList() resource.Resource { + return &AllowList{} +} + +// Metadata returns the allowlist resource type name. +func (r *AllowList) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_allowlist" +} + +// Schema defines the schema for the allowlist resource. +func (r *AllowList) Schema(ctx context.Context, rsc resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = AllowlistsSchema() +} + +// Configure set provider-defined data, clients, etc. that is passed to data sources or resources in the provider. +func (r *AllowList) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + r.Data = data +} + +// Create creates a new allowlist +func (r *AllowList) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.AllowList + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + allowListRequest := api.CreateAllowListRequest{ + Cidr: plan.Cidr.ValueString(), + Comment: plan.Comment.ValueString(), + ExpiresAt: plan.ExpiresAt.ValueString(), + } + + response, err := r.Client.Execute( + fmt.Sprintf( + "%s/v4/organizations/%s/projects/%s/clusters/%s/allowedcidrs", + r.HostURL, + plan.OrganizationId.ValueString(), + plan.ProjectId.ValueString(), + plan.ClusterId.ValueString(), + ), + http.MethodPost, + allowListRequest, + r.Token, + nil, + ) + if err != nil { + resp.Diagnostics.AddError( + "Error executing request", + "Could not execute request, unexpected error: "+err.Error(), + ) + return + } + + allowListResponse := api.GetAllowListResponse{} + err = json.Unmarshal(response.Body, &allowListResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating allow list", + "Could not create allow list, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := r.refreshAllowList(ctx, plan.OrganizationId.ValueString(), plan.ProjectId.ValueString(), plan.ClusterId.ValueString(), allowListResponse.Id.String()) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error reading Capella AllowList", + "Could not read Capella AllowList "+allowListResponse.Id.String()+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error reading Capella AllowList", + "Could not read Capella AllowList "+allowListResponse.Id.String()+": "+err.Error(), + ) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read reads project information. +func (r *AllowList) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.AllowList + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + // Validate parameters were successfully imported + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella AllowList", + "Could not read Capella allow list: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs["organizationId"] + projectId = resourceIDs["projectId"] + clusterId = resourceIDs["clusterId"] + allowListId = resourceIDs["allowListId"] + ) + + // refresh the existing allow list + refreshedState, err := r.refreshAllowList(ctx, organizationId, projectId, clusterId, allowListId) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Reading Capella AllowList", + "Could not read Capella allowListID "+allowListId+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella AllowList", + "Could not read Capella allowListID "+allowListId+": "+err.Error(), + ) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the allowlist. +func (r *AllowList) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Couchbase Capella's v4 does not support a PUT endpoint for allowlists. + // Allowlists can only be created, read and deleted. + // http://cbc-cp-api.s3-website-us-east-1.amazonaws.com/#tag/allowedCIDRs(Cluster) + // + // Note: In this situation, terraform apply will default to deleting and executing a new create. + // The update implementation should simply be left empty. + // https://developer.hashicorp.com/terraform/plugin/framework/resources/update +} + +// Delete deletes the allow list. +func (r *AllowList) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve existing state + var state providerschema.AllowList + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting Capella Allow List", + "Could not delete Capella allow list: "+err.Error(), + ) + return + } + // Execute request to delete existing allowlist + _, err = r.Client.Execute( + fmt.Sprintf( + "%s/v4/organizations/%s/projects/%s/clusters/%s/allowedcidrs/%s", + r.HostURL, + resourceIDs["organizationId"], + resourceIDs["projectId"], + resourceIDs["clusterId"], + resourceIDs["allowListId"], + ), + http.MethodDelete, + nil, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Deleting Capella Allow List", + "Could not delete Capella allowListId "+resourceIDs["allowListId"]+": "+err.CompleteError(), + ) + tflog.Info(ctx, "resource doesn't exist in remote server") + return + } + default: + resp.Diagnostics.AddError( + "Error Deleting Capella Allow List", + "Could not delete Capella allowListId "+resourceIDs["allowListId"]+": "+err.Error(), + ) + return + } +} + +// ImportState imports a remote allowlist that is not created by Terraform. +// Since Capella APIs may require multiple IDs, such as organizationId, projectId, clusterId, +// this function passes the root attribute which is a comma separated string of multiple IDs. +// example: id=cluster123,project_id=proj123,organization_id=org123 +// Unfortunately the terraform import CLI doesn't allow us to pass multiple IDs at this point +// and hence this workaround has been applied. +func (r *AllowList) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// getAllowList is used to retrieve an existing allow list +func (r *AllowList) getAllowList(ctx context.Context, organizationId, projectId, clusterId, allowListId string) (*api.GetAllowListResponse, error) { + response, err := r.Client.Execute( + fmt.Sprintf( + "%s/v4/organizations/%s/projects/%s/clusters/%s/allowedcidrs/%s", + r.HostURL, + organizationId, + projectId, + clusterId, + allowListId, + ), + http.MethodGet, + nil, + r.Token, + nil, + ) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrConstructingRequest, err) + } + + allowListResp := api.GetAllowListResponse{} + err = json.Unmarshal(response.Body, &allowListResp) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrUnmarshallingResponse, err) + } + return &allowListResp, nil +} + +// refreshAllowList is used to pass an existing AllowList to the refreshed state +func (r *AllowList) refreshAllowList(ctx context.Context, organizationId, projectId, clusterId, allowListId string) (*providerschema.OneAllowList, error) { + allowListResp, err := r.getAllowList(ctx, organizationId, projectId, clusterId, allowListId) + if err != nil { + return nil, err + } + + refreshedState := providerschema.OneAllowList{ + Id: types.StringValue(allowListResp.Id.String()), + OrganizationId: types.StringValue(organizationId), + ProjectId: types.StringValue(projectId), + ClusterId: types.StringValue(clusterId), + Cidr: types.StringValue(allowListResp.Cidr), + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: types.StringValue(allowListResp.Audit.CreatedAt.String()), + CreatedBy: types.StringValue(allowListResp.Audit.CreatedBy), + ModifiedAt: types.StringValue(allowListResp.Audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(allowListResp.Audit.ModifiedBy), + Version: types.Int64Value(int64(allowListResp.Audit.Version)), + }, + } + + // Set optional fields + if allowListResp.Comment != nil { + refreshedState.Comment = types.StringValue(*allowListResp.Comment) + } + + if allowListResp.ExpiresAt != nil { + refreshedState.ExpiresAt = types.StringValue(*allowListResp.ExpiresAt) + } + + return &refreshedState, nil +} diff --git a/internal/resources/allowlist_schema.go b/internal/resources/allowlist_schema.go new file mode 100644 index 00000000..b1c029f3 --- /dev/null +++ b/internal/resources/allowlist_schema.go @@ -0,0 +1,28 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func AllowlistsSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_id": stringAttribute(required, requiresReplace), + "project_id": stringAttribute(required, requiresReplace), + "cluster_id": stringAttribute(required, requiresReplace), + "cidr": stringAttribute(required, requiresReplace), + "comment": stringAttribute(optional, requiresReplace), + "expires_at": stringAttribute(optional, requiresReplace), + "if_match": stringAttribute(optional), + "audit": computedAuditAttribute(), + }, + } +} diff --git a/internal/resources/apikey.go b/internal/resources/apikey.go new file mode 100644 index 00000000..7927ab47 --- /dev/null +++ b/internal/resources/apikey.go @@ -0,0 +1,563 @@ +package resources + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework/path" + + "terraform-provider-capella/internal/api" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &ApiKey{} + _ resource.ResourceWithConfigure = &ApiKey{} + _ resource.ResourceWithImportState = &ApiKey{} +) + +// ApiKey is the ApiKey resource implementation. +type ApiKey struct { + *providerschema.Data +} + +func NewApiKey() resource.Resource { + return &ApiKey{} +} + +// Metadata returns the apiKey resource type name. +func (r *ApiKey) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_apikey" +} + +// Schema defines the schema for the apiKey resource. +func (r *ApiKey) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = ApiKeySchema() +} + +// Configure adds the provider configured client to the apiKey resource. +func (r *ApiKey) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.Data = data +} + +// Create creates a new apiKey. +func (a *ApiKey) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.ApiKey + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + err := a.validateCreateApiKeyRequest(plan) + if err != nil { + resp.Diagnostics.AddError( + "Error creating ApiKey", + "Could not create ApiKey, unexpected error:"+err.Error(), + ) + return + } + + var organizationId = plan.OrganizationId.ValueString() + + apiKeyRequest := api.CreateApiKeyRequest{ + Name: plan.Name.ValueString(), + OrganizationRoles: a.convertOrganizationRoles(plan.OrganizationRoles), + } + + if !plan.Description.IsNull() && !plan.Description.IsUnknown() { + apiKeyRequest.Description = plan.Description.ValueStringPointer() + } + + if !plan.Expiry.IsNull() && !plan.Expiry.IsUnknown() { + expiry := float32(plan.Expiry.ValueFloat64()) + apiKeyRequest.Expiry = &expiry + } + + convertedResources, err := a.convertResources(plan.Resources) + if err != nil { + resp.Diagnostics.AddError( + "Error creating ApiKey", + "Could not create ApiKey, unexpected error:"+err.Error(), + ) + return + } + apiKeyRequest.Resources = &convertedResources + + if !plan.AllowedCIDRs.IsNull() && !plan.AllowedCIDRs.IsUnknown() { + convertedAllowedCidr, err := a.convertAllowedCidrs(ctx, plan.AllowedCIDRs) + if err != nil { + resp.Diagnostics.AddError( + "Error creating ApiKey", + "Could not create ApiKey, unexpected error:"+err.Error(), + ) + return + } + apiKeyRequest.AllowedCIDRs = &convertedAllowedCidr + } + + response, err := a.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/apikeys", a.HostURL, organizationId), + http.MethodPost, + apiKeyRequest, + a.Token, + nil, + ) + _, err = handleApiKeyError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error creating ApiKey", + "Could not create ApiKey, unexpected error: "+err.Error(), + ) + return + } + + apiKeyResponse := api.CreateApiKeyResponse{} + err = json.Unmarshal(response.Body, &apiKeyResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating ApiKey", + "Could not create ApiKey, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := a.retrieveApiKey(ctx, organizationId, apiKeyResponse.Id) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella ApiKeys", + "Could not read Capella ApiKey ID "+apiKeyResponse.Id+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella ApiKeys", + "Could not read Capella ApiKey ID "+apiKeyResponse.Id+": "+err.Error(), + ) + return + } + + resources, err := providerschema.OrderList2(plan.Resources, refreshedState.Resources) + switch err { + case nil: + refreshedState.Resources = resources + default: + tflog.Error(ctx, err.Error()) + } + + for i, resource := range refreshedState.Resources { + if providerschema.AreEqual(resource.Roles, plan.Resources[i].Roles) { + refreshedState.Resources[i].Roles = plan.Resources[i].Roles + } + } + + if providerschema.AreEqual(refreshedState.OrganizationRoles, plan.OrganizationRoles) { + refreshedState.OrganizationRoles = plan.OrganizationRoles + } + + refreshedState.Token = types.StringValue(apiKeyResponse.Token) + refreshedState.Rotate = plan.Rotate + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read reads ApiKey information. +func (a *ApiKey) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.ApiKey + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error reading api key", + "Could not read api key id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + apiKeyId = resourceIDs[providerschema.ApiKeyId] + ) + + // Get refreshed api key value from Capella + refreshedState, err := a.retrieveApiKey(ctx, organizationId, apiKeyId) + resourceNotFound, err := handleApiKeyError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error reading api key", + "Could not read api key id "+state.Id.String()+": "+err.Error(), + ) + return + } + + resources, err := providerschema.OrderList2(state.Resources, refreshedState.Resources) + switch err { + case nil: + refreshedState.Resources = resources + default: + tflog.Warn(ctx, err.Error()) + } + + if len(state.Resources) == len(refreshedState.Resources) { + for i, resource := range refreshedState.Resources { + if providerschema.AreEqual(resource.Roles, state.Resources[i].Roles) { + refreshedState.Resources[i].Roles = state.Resources[i].Roles + } + } + } + + if providerschema.AreEqual(refreshedState.OrganizationRoles, state.OrganizationRoles) { + refreshedState.OrganizationRoles = state.OrganizationRoles + } + + refreshedState.Token = state.Token + refreshedState.Rotate = state.Rotate + refreshedState.Secret = state.Secret + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update rotates the ApiKey. +func (a *ApiKey) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan, state providerschema.ApiKey + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error rotate api key", + "Could not rotate api key id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + apiKeyId = resourceIDs[providerschema.ApiKeyId] + ) + + if plan.Rotate.IsNull() || plan.Rotate.IsUnknown() { + resp.Diagnostics.AddError( + "Error rotating api key", + "Could not rotate api key id "+state.Id.String()+": rotate value is not set", + ) + return + } + + if !state.Rotate.IsNull() && !state.Rotate.IsUnknown() { + planRotate := *plan.Rotate.ValueBigFloat() + stateRotate := *state.Rotate.ValueBigFloat() + if planRotate.Cmp(&stateRotate) != 1 { + resp.Diagnostics.AddError( + "Error rotating api key", + "Could not rotate api key id "+state.Id.String()+": plan rotate value is not greater than state rotate value", + ) + return + } + } + + var rotateApiRequest api.RotateAPIKeyRequest + if !plan.Secret.IsNull() || !plan.Secret.IsUnknown() { + rotateApiRequest = api.RotateAPIKeyRequest{ + Secret: plan.Secret.ValueStringPointer(), + } + } + + response, err := a.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/apikeys/%s/rotate", a.HostURL, organizationId, apiKeyId), + http.MethodPost, + rotateApiRequest, + a.Token, + nil, + ) + _, err = handleApiKeyError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error rotating api key", + "Could not rotate api key id "+state.Id.String()+": "+err.Error(), + ) + return + } + + rotateApiKeyResponse := api.RotateAPIKeyResponse{} + err = json.Unmarshal(response.Body, &rotateApiKeyResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error rotating api key", + "Could not rotate api key id "+state.Id.String()+": "+err.Error(), + ) + return + } + + currentState, err := a.retrieveApiKey(ctx, organizationId, apiKeyId) + _, err = handleApiKeyError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error rotating api key", + "Could not rotate api key id "+state.Id.String()+": "+err.Error(), + ) + return + } + + resources, err := providerschema.OrderList2(state.Resources, currentState.Resources) + switch err { + case nil: + currentState.Resources = resources + default: + tflog.Error(ctx, err.Error()) + } + + for i, resource := range currentState.Resources { + if providerschema.AreEqual(resource.Roles, state.Resources[i].Roles) { + currentState.Resources[i].Roles = state.Resources[i].Roles + } + } + + if providerschema.AreEqual(currentState.OrganizationRoles, state.OrganizationRoles) { + currentState.OrganizationRoles = state.OrganizationRoles + } + + currentState.Secret = types.StringValue(rotateApiKeyResponse.SecretKey) + if !state.Id.IsNull() && !state.Id.IsUnknown() && !state.Secret.IsNull() && !state.Secret.IsUnknown() { + currentState.Token = types.StringValue(base64.StdEncoding.EncodeToString([]byte(state.Id.ValueString() + ":" + state.Secret.ValueString()))) + } + currentState.Rotate = plan.Rotate + + // Set state to fully populated data + diags = resp.State.Set(ctx, currentState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the ApiKey. +func (a *ApiKey) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve values from state + var state providerschema.ApiKey + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error deleting api key", + "Could not delete api key id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + apiKeyId = resourceIDs[providerschema.ApiKeyId] + ) + + // Delete existing api key + _, err = a.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/apikeys/%s", a.HostURL, organizationId, apiKeyId), + http.MethodDelete, + nil, + a.Token, + nil, + ) + resourceNotFound, err := handleApiKeyError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error deleting api key", + "Could not delete api key id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } +} + +func (a *ApiKey) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// retrieveApiKey retrieves apikey information for a specified organization and apiKeyId. +func (a *ApiKey) retrieveApiKey(ctx context.Context, organizationId, apiKeyId string) (*providerschema.ApiKey, error) { + response, err := a.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/apikeys/%s", a.HostURL, organizationId, apiKeyId), + http.MethodGet, + nil, + a.Token, + nil, + ) + if err != nil { + return nil, err + } + + apiKeyResp := api.GetApiKeyResponse{} + err = json.Unmarshal(response.Body, &apiKeyResp) + if err != nil { + return nil, err + } + + audit := providerschema.NewCouchbaseAuditData(apiKeyResp.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + return nil, fmt.Errorf("error while audit conversion") + } + + refreshedState, err := providerschema.NewApiKey(&apiKeyResp, organizationId, auditObj) + if err != nil { + return nil, err + } + return refreshedState, nil +} + +// this func extract error message if error is api.Error and also checks whether error is +// resource not found +func handleApiKeyError(err error) (bool, error) { + switch err := err.(type) { + case nil: + return false, nil + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + return false, fmt.Errorf(err.CompleteError()) + } + return true, fmt.Errorf(err.CompleteError()) + default: + return false, err + } +} + +// validateCreateApiKeyRequest validates the required fields in the create request. +func (a *ApiKey) validateCreateApiKeyRequest(plan providerschema.ApiKey) error { + if plan.OrganizationId.IsNull() { + return fmt.Errorf("organizationId cannot be empty") + } + if plan.Name.IsNull() { + return fmt.Errorf("name cannot be empty") + } + if plan.OrganizationRoles == nil { + return fmt.Errorf("organizationRoles cannot be empty") + } + if plan.Resources == nil { + return fmt.Errorf("resource cannot be nil") + } + if !plan.Rotate.IsNull() && !plan.Rotate.IsUnknown() { + return fmt.Errorf("rotate value should not be set") + } + if !plan.Secret.IsNull() && !plan.Secret.IsUnknown() { + return fmt.Errorf("secret should not be set while create operation") + } + return nil +} + +// convertOrganizationRoles is used to convert all roles +// in an array of basetypes.StringValue to strings. +func (a *ApiKey) convertOrganizationRoles(organizationRoles []basetypes.StringValue) []string { + var convertedRoles []string + for _, role := range organizationRoles { + convertedRoles = append(convertedRoles, role.ValueString()) + } + return convertedRoles +} + +// convertResource is used to convert a resource object containing nested fields +// of type basetypes.StringValue to a resource object containing nested fields of go defined type. +func (a *ApiKey) convertResources(resources []providerschema.ApiKeyResourcesItems) ([]api.ResourcesItems, error) { + var convertedResources []api.ResourcesItems + for _, resource := range resources { + id, err := uuid.Parse(resource.Id.ValueString()) + if err != nil { + return nil, fmt.Errorf("resource id is not valid uuid") + } + convertedResource := api.ResourcesItems{ + Id: id, + } + + var convertedRoles []string + for _, role := range resource.Roles { + convertedRoles = append(convertedRoles, role.ValueString()) + } + convertedResource.Roles = convertedRoles + + if !resource.Type.IsNull() && !resource.Type.IsUnknown() { + convertedResource.Type = resource.Type.ValueStringPointer() + } + convertedResources = append(convertedResources, convertedResource) + } + return convertedResources, nil +} + +// convertAllowedCidrs is used to convert allowed cidrs in types.List to array of string. +func (a *ApiKey) convertAllowedCidrs(ctx context.Context, allowedCidrs types.List) ([]string, error) { + elements := make([]types.String, 0, len(allowedCidrs.Elements())) + diags := allowedCidrs.ElementsAs(ctx, &elements, false) + if diags.HasError() { + return nil, fmt.Errorf("error while extracting allowedCidrs elements") + } + + var convertedAllowedCidrs []string + for _, allowedCidr := range elements { + convertedAllowedCidrs = append(convertedAllowedCidrs, allowedCidr.ValueString()) + } + return convertedAllowedCidrs, nil +} diff --git a/internal/resources/apikey_schema.go b/internal/resources/apikey_schema.go new file mode 100644 index 00000000..e8e5dcde --- /dev/null +++ b/internal/resources/apikey_schema.go @@ -0,0 +1,64 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ApiKeySchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_id": stringAttribute(required, requiresReplace), + "name": stringAttribute(required, requiresReplace), + "description": stringAttribute(optional, computed, requiresReplace, useStateForUnknown), + "expiry": float64Attribute(optional, computed, requiresReplace, useStateForUnknown), + "allowed_cidrs": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.StringType, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + listplanmodifier.RequiresReplace(), + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + Default: listdefault.StaticValue(types.ListValueMust(types.StringType, []attr.Value{types.StringValue("0.0.0.0/0")})), + }, + "organization_roles": stringListAttribute(required, requiresReplace), + "resources": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": stringAttribute(required), + "roles": stringListAttribute(required), + "type": stringAttribute(optional, computed), + }, + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + }, + "rotate": schema.NumberAttribute{ + Optional: true, + Computed: true, + }, + "secret": stringAttribute(optional, computed, sensitive), + "token": stringAttribute(computed, sensitive), + "audit": computedAuditAttribute(), + }, + } +} diff --git a/internal/resources/attributes.go b/internal/resources/attributes.go new file mode 100644 index 00000000..fac9a205 --- /dev/null +++ b/internal/resources/attributes.go @@ -0,0 +1,167 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +const ( + optional = "optional" + computed = "computed" + required = "required" + sensitive = "sensitive" + requiresReplace = "requiresReplace" + useStateForUnknown = "useStateForUnknown" +) + +// stringAttribute is a variadic function which sets the requested fields +// in a string attribute to true and then returns the string attribute. +func stringAttribute(fields ...string) *schema.StringAttribute { + attribute := schema.StringAttribute{} + + for _, field := range fields { + switch field { + case required: + attribute.Required = true + case optional: + attribute.Optional = true + case computed: + attribute.Computed = true + case sensitive: + attribute.Sensitive = true + case requiresReplace: + var planModifiers = []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + } + attribute.PlanModifiers = append(attribute.PlanModifiers, planModifiers...) + case useStateForUnknown: + var planModifiers = []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + } + attribute.PlanModifiers = append(attribute.PlanModifiers, planModifiers...) + } + } + return &attribute +} + +// boolAttribute is a variadic function which sets the requested fields +// in a bool attribute to true and then returns the string attribute. +func boolAttribute(fields ...string) *schema.BoolAttribute { + attribute := schema.BoolAttribute{} + + for _, field := range fields { + switch field { + case required: + attribute.Required = true + case optional: + attribute.Optional = true + case computed: + attribute.Computed = true + case sensitive: + attribute.Sensitive = true + case requiresReplace: + var planModifiers = []planmodifier.Bool{ + boolplanmodifier.RequiresReplace(), + } + attribute.PlanModifiers = planModifiers + } + } + return &attribute +} + +// int64Attribute is a variadic function which sets the requested fields +// in an Int64 attribute to true and then returns the string attribute. +func int64Attribute(fields ...string) *schema.Int64Attribute { + attribute := schema.Int64Attribute{} + + for _, field := range fields { + switch field { + case required: + attribute.Required = true + case optional: + attribute.Optional = true + case computed: + attribute.Computed = true + case sensitive: + attribute.Sensitive = true + } + } + return &attribute +} + +// float64Attribute is a variadic function which sets the requested fields +// in a float64 attribute to true and then returns the string attribute. +func float64Attribute(fields ...string) *schema.Float64Attribute { + attribute := schema.Float64Attribute{} + + for _, field := range fields { + switch field { + case required: + attribute.Required = true + case optional: + attribute.Optional = true + case computed: + attribute.Computed = true + case sensitive: + attribute.Sensitive = true + case requiresReplace: + var planModifiers = []planmodifier.Float64{ + float64planmodifier.RequiresReplace(), + } + attribute.PlanModifiers = append(attribute.PlanModifiers, planModifiers...) + case useStateForUnknown: + var planModifiers = []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + } + attribute.PlanModifiers = append(attribute.PlanModifiers, planModifiers...) + } + } + return &attribute +} + +// stringListAttribute returns a Terraform string list schema attribute +// which is configured to be of type string. +func stringListAttribute(fields ...string) *schema.ListAttribute { + attribute := schema.ListAttribute{ + ElementType: types.StringType, + } + + for _, field := range fields { + switch field { + case required: + attribute.Required = true + case optional: + attribute.Optional = true + case computed: + attribute.Computed = true + case sensitive: + attribute.Sensitive = true + case requiresReplace: + var planModifiers = []planmodifier.List{ + listplanmodifier.RequiresReplace(), + } + attribute.PlanModifiers = planModifiers + } + } + return &attribute +} + +// computedAuditAttribute retuns a SingleNestedAttribute to +// represent couchbase audit data using terraform schema types. +func computedAuditAttribute() *schema.SingleNestedAttribute { + return &schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "created_at": stringAttribute(computed), + "created_by": stringAttribute(computed), + "modified_at": stringAttribute(computed), + "modified_by": stringAttribute(computed), + "version": int64Attribute(computed), + }, + } +} diff --git a/internal/resources/bucket.go b/internal/resources/bucket.go new file mode 100644 index 00000000..3aa35e06 --- /dev/null +++ b/internal/resources/bucket.go @@ -0,0 +1,410 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "net/http" + "terraform-provider-capella/internal/api" + bucketapi "terraform-provider-capella/internal/api/bucket" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &Bucket{} + _ resource.ResourceWithConfigure = &Bucket{} + _ resource.ResourceWithImportState = &Bucket{} +) + +// Bucket is the bucket resource implementation. +type Bucket struct { + *providerschema.Data +} + +// NewBucket is a helper function to simplify the provider implementation. +func NewBucket() resource.Resource { + return &Bucket{} +} + +// Metadata returns the Cluster resource type name. +func (c *Bucket) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bucket" +} + +// Schema defines the schema for the Cluster resource. +func (c *Bucket) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = BucketSchema() +} + +// Create creates a new Bucket. +func (c *Bucket) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.Bucket + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + BucketRequest := bucketapi.CreateBucketRequest{ + Name: plan.Name.ValueString(), + Type: plan.Type.ValueString(), + StorageBackend: plan.StorageBackend.ValueString(), + MemoryAllocationInMb: plan.MemoryAllocationInMb, + BucketConflictResolution: plan.BucketConflictResolution.ValueString(), + DurabilityLevel: plan.DurabilityLevel.ValueString(), + Replicas: plan.Replicas, + Flush: plan.Flush, + TimeToLiveInSeconds: plan.TimeToLiveInSeconds, + EvictionPolicy: plan.EvictionPolicy.ValueString(), + } + + if plan.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating bucket", + "Could not create bucket, unexpected error: "+errors.ErrOrganizationIdCannotBeEmpty.Error(), + ) + return + } + var organizationId = plan.OrganizationId.ValueString() + + if plan.ProjectId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrProjectIdCannotBeEmpty.Error(), + ) + return + } + var projectId = plan.ProjectId.ValueString() + + if plan.ClusterId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrClusterIdCannotBeEmpty.Error(), + ) + return + } + var clusterId = plan.ClusterId.ValueString() + + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/buckets", c.HostURL, organizationId, projectId, clusterId), + http.MethodPost, + BucketRequest, + c.Token, + nil, + ) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error creating bucket", + "Could not create bucket, unexpected error: "+string(response.Body), + ) + return + } + + BucketResponse := bucketapi.GetBucketResponse{} + err = json.Unmarshal(response.Body, &BucketResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating bucket", + "Could not create bucket, error during unmarshalling:"+err.Error(), + ) + return + } + + refreshedState, err := c.retrieveBucket(ctx, organizationId, projectId, clusterId, BucketResponse.Id) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Bucket", + "Could not read Capella bucket with ID "+BucketResponse.Id+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Bucket", + "Could not read Capella bucket with ID "+BucketResponse.Id+": "+err.Error(), + ) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure It adds the provider configured api to the project resource. +func (c *Bucket) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + c.Data = data +} + +// Read reads the bucket information. +func (c *Bucket) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.Bucket + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + bucketId, clusterId, projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Bucket in Capella", + "Could not read Capella Bucket with ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + refreshedState, err := c.retrieveBucket(ctx, organizationId, projectId, clusterId, bucketId) + resourceNotFound, err := handleBucketError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error reading bucket", + "Could not read bucket with id "+state.Id.String()+": "+err.Error(), + ) + return + } + + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the bucket. +func (r *Bucket) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state providerschema.Bucket + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if state.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating bucket", + "Could not create bucket, unexpected error: "+errors.ErrOrganizationIdCannotBeEmpty.Error(), + ) + return + } + var organizationId = state.OrganizationId.ValueString() + + if state.ProjectId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrProjectIdCannotBeEmpty.Error(), + ) + return + } + var projectId = state.ProjectId.ValueString() + + if state.ClusterId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrClusterIdCannotBeEmpty.Error(), + ) + return + } + var clusterId = state.ClusterId.ValueString() + + if state.Id.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrClusterIdCannotBeEmpty.Error(), + ) + return + } + var bucketId = state.Id.ValueString() + + _, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/buckets/%s", r.HostURL, organizationId, projectId, clusterId, bucketId), + http.MethodDelete, + nil, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Deleting the Bucket", + "Could not delete Bucket associated with cluster "+clusterId+": "+err.CompleteError(), + ) + return + } + default: + resp.Diagnostics.AddError( + "Error Deleting Bucket", + "Could not delete Bucket associated with cluster "+clusterId+": "+err.Error(), + ) + return + } +} + +// ImportState imports a remote cluster that is not created by Terraform. +func (c *Bucket) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// retrieveBucket retrieves bucket information for a specified organization, project, cluster and bucket ID. +func (c *Bucket) retrieveBucket(ctx context.Context, organizationId, projectId, clusterId, bucketId string) (*providerschema.OneBucket, error) { + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/buckets/%s", c.HostURL, organizationId, projectId, clusterId, bucketId), + http.MethodGet, + nil, + c.Token, + nil, + ) + if err != nil { + return nil, err + } + + bucketResp := bucketapi.GetBucketResponse{} + err = json.Unmarshal(response.Body, &bucketResp) + if err != nil { + return nil, err + } + + refreshedState := providerschema.OneBucket{ + Id: types.StringValue(bucketResp.Id), + Name: types.StringValue(bucketResp.Name), + OrganizationId: types.StringValue(organizationId), + ProjectId: types.StringValue(projectId), + ClusterId: types.StringValue(clusterId), + Type: types.StringValue(bucketResp.Type), + StorageBackend: types.StringValue(bucketResp.StorageBackend), + MemoryAllocationInMb: bucketResp.MemoryAllocationInMb, + BucketConflictResolution: types.StringValue(bucketResp.BucketConflictResolution), + DurabilityLevel: types.StringValue(bucketResp.DurabilityLevel), + Replicas: bucketResp.Replicas, + Flush: bucketResp.Flush, + TimeToLiveInSeconds: bucketResp.TimeToLiveInSeconds, + EvictionPolicy: types.StringValue(bucketResp.EvictionPolicy), + Stats: &providerschema.Stats{ + ItemCount: types.Int64Value(int64(bucketResp.Stats.ItemCount)), + OpsPerSecond: types.Int64Value(int64(bucketResp.Stats.OpsPerSecond)), + DiskUsedInMib: types.Int64Value(int64(bucketResp.Stats.DiskUsedInMib)), + MemoryUsedInMib: types.Int64Value(int64(bucketResp.Stats.MemoryUsedInMib)), + }, + } + + return &refreshedState, nil +} + +// Update updates the bucket. +func (c *Bucket) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state providerschema.Bucket + diags := req.Plan.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + bucketId, clusterId, projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Bucket in Capella", + "Could not read Capella Bucket with ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + bucketUpdateRequest := bucketapi.PutBucketRequest{ + MemoryAllocationInMb: state.MemoryAllocationInMb, + DurabilityLevel: state.DurabilityLevel.ValueString(), + Replicas: state.Replicas, + Flush: state.Flush, + TimeToLiveInSeconds: state.TimeToLiveInSeconds, + } + + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/buckets/%s", c.HostURL, organizationId, projectId, clusterId, bucketId), + http.MethodPut, + bucketUpdateRequest, + c.Token, + nil, + ) + + _, err = handleBucketError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error updating bucket", + "Could not update bucket, unexpected error: "+string(response.Body), + ) + return + } + + currentState, err := c.retrieveBucket(ctx, organizationId, projectId, clusterId, bucketId) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error updating bucket", + "Could not update Capella bucket with ID "+bucketId+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error updating bucket", + "Could not update Capella bucket with ID "+bucketId+": "+err.Error(), + ) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, currentState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// handleBucketError extracts error message if error is api.Error and also checks whether error is +// resource not found +func handleBucketError(err error) (bool, error) { + switch err := err.(type) { + case nil: + return false, nil + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + return false, fmt.Errorf(err.CompleteError()) + } + return true, fmt.Errorf(err.CompleteError()) + default: + return false, err + } +} diff --git a/internal/resources/bucket_schema.go b/internal/resources/bucket_schema.go new file mode 100644 index 00000000..2c6e5f5f --- /dev/null +++ b/internal/resources/bucket_schema.go @@ -0,0 +1,42 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func BucketSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_id": stringAttribute(required), + "project_id": stringAttribute(required), + "cluster_id": stringAttribute(required), + "name": stringAttribute(required), + "type": stringAttribute(optional), + "storage_backend": stringAttribute(optional), + "memory_allocationinmb": int64Attribute(optional), + "conflict_resolution": stringAttribute(optional), + "durability_level": stringAttribute(optional), + "replicas": int64Attribute(optional), + "flush": boolAttribute(optional), + "ttl": int64Attribute(optional), + "eviction_policy": stringAttribute(computed, optional), + "stats": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "item_count": int64Attribute(computed), + "ops_per_second": int64Attribute(computed), + "disk_used_in_mib": int64Attribute(computed), + "memory_used_in_mib": int64Attribute(computed), + }, + }, + }, + } +} diff --git a/internal/resources/cluster.go b/internal/resources/cluster.go new file mode 100644 index 00000000..70fed0e1 --- /dev/null +++ b/internal/resources/cluster.go @@ -0,0 +1,718 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" + + "terraform-provider-capella/internal/api" + clusterapi "terraform-provider-capella/internal/api/cluster" + providerschema "terraform-provider-capella/internal/schema" + + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &Cluster{} + _ resource.ResourceWithConfigure = &Cluster{} + _ resource.ResourceWithImportState = &Cluster{} +) + +// Cluster is the project resource implementation. +type Cluster struct { + *providerschema.Data +} + +// NewCluster is a helper function to simplify the provider implementation. +func NewCluster() resource.Resource { + return &Cluster{} +} + +// Metadata returns the Cluster resource type name. +func (c *Cluster) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cluster" +} + +// Schema defines the schema for the Cluster resource. +func (c *Cluster) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = ClusterSchema() +} + +// Create creates a new Cluster. +func (c *Cluster) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.Cluster + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + ClusterRequest := clusterapi.CreateClusterRequest{ + Name: plan.Name.ValueString(), + Availability: clusterapi.Availability{ + Type: clusterapi.AvailabilityType(plan.Availability.Type.ValueString()), + }, + CloudProvider: clusterapi.CloudProvider{ + Cidr: plan.CloudProvider.Cidr.ValueString(), + Region: plan.CloudProvider.Region.ValueString(), + Type: clusterapi.CloudProviderType(plan.CloudProvider.Type.ValueString()), + }, + Support: clusterapi.Support{ + Plan: clusterapi.SupportPlan(plan.Support.Plan.ValueString()), + Timezone: clusterapi.SupportTimezone(plan.Support.Timezone.ValueString()), + }, + } + + if !plan.Description.IsNull() && !plan.Description.IsUnknown() { + ClusterRequest.Description = plan.Description.ValueStringPointer() + } + + if !plan.CouchbaseServer.Version.IsNull() && !plan.CouchbaseServer.Version.IsUnknown() { + version := plan.CouchbaseServer.Version.ValueString() + ClusterRequest.CouchbaseServer = &clusterapi.CouchbaseServer{ + Version: &version, + } + } + + serviceGroups, err := c.morphToApiServiceGroups(plan) + if err != nil { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster : unexpected error "+err.Error(), + ) + return + } + + ClusterRequest.ServiceGroups = serviceGroups + + if plan.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating Cluster", + "Could not create Cluster, unexpected error: organization ID cannot be empty.", + ) + return + } + var organizationId = plan.OrganizationId.ValueString() + + if plan.ProjectId.IsNull() { + resp.Diagnostics.AddError( + "Error creating Cluster", + "Could not create Cluster, unexpected error: organization ID cannot be empty.", + ) + return + } + var projectId = plan.ProjectId.ValueString() + + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters", c.HostURL, organizationId, projectId), + http.MethodPost, + ClusterRequest, + c.Token, + nil, + ) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: "+err.Error(), + ) + return + } + + ClusterResponse := clusterapi.GetClusterResponse{} + err = json.Unmarshal(response.Body, &ClusterResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating Cluster", + "Could not create Cluster, error during unmarshalling:"+err.Error(), + ) + return + } + + err = c.checkClusterStatus(ctx, organizationId, projectId, ClusterResponse.Id.String()) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := c.retrieveCluster(ctx, organizationId, projectId, ClusterResponse.Id.String()) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error creating cluster", + "Could not create cluster, unexpected error: "+err.Error(), + ) + return + } + + for i, serviceGroup := range refreshedState.ServiceGroups { + if clusterapi.AreEqual(plan.ServiceGroups[i].Services, serviceGroup.Services) { + refreshedState.ServiceGroups[i].Services = plan.ServiceGroups[i].Services + } + } + + //need to have proper check since we are passing 7.1 and response is returning 7.1.5 + c.populateInputServerVersionIfPresent(&plan, refreshedState) + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Configure It adds the provider configured api to the project resource. +func (c *Cluster) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + c.Data = data +} + +// Read reads project information. +func (c *Cluster) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.Cluster + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error reading cluster", + "Could not read cluster id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + projectId = resourceIDs[providerschema.ProjectId] + clusterId = resourceIDs[providerschema.ClusterId] + ) + + // Get refreshed Cluster value from Capella + refreshedState, err := c.retrieveCluster(ctx, organizationId, projectId, clusterId) + resourceNotFound, err := handleClusterError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error reading cluster", + "Could not read cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + if len(state.ServiceGroups) == len(refreshedState.ServiceGroups) { + for i, serviceGroup := range refreshedState.ServiceGroups { + if clusterapi.AreEqual(state.ServiceGroups[i].Services, serviceGroup.Services) { + refreshedState.ServiceGroups[i].Services = state.ServiceGroups[i].Services + } + } + } + + //need to have proper check since we are passing 7.1 and response is returning 7.1.5 + c.populateInputServerVersionIfPresent(&state, refreshedState) + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the Cluster. +func (c *Cluster) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan, state providerschema.Cluster + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + projectId = resourceIDs[providerschema.ProjectId] + clusterId = resourceIDs[providerschema.ClusterId] + ) + + if err := c.validateClusterUpdate(plan, state); err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + ClusterRequest := clusterapi.UpdateClusterRequest{ + Description: plan.Description.ValueString(), + Name: plan.Name.ValueString(), + Support: clusterapi.Support{ + Plan: clusterapi.SupportPlan(plan.Support.Plan.ValueString()), + Timezone: clusterapi.SupportTimezone(plan.Support.Timezone.ValueString()), + }, + } + + serviceGroups, err := c.morphToApiServiceGroups(plan) + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + ClusterRequest.ServiceGroups = serviceGroups + + var headers = make(map[string]string) + if !state.IfMatch.IsUnknown() && !state.IfMatch.IsNull() { + headers["If-Match"] = state.IfMatch.ValueString() + } + + // Update existing Cluster + _, err = c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s", c.HostURL, organizationId, projectId, clusterId), + http.MethodPut, + ClusterRequest, + c.Token, + headers, + ) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + err = c.checkClusterStatus(ctx, organizationId, projectId, clusterId) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + currentState, err := c.retrieveCluster(ctx, organizationId, projectId, clusterId) + _, err = handleClusterError(err) + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + "Could not update cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + if !plan.IfMatch.IsUnknown() && !plan.IfMatch.IsNull() { + currentState.IfMatch = plan.IfMatch + } + + for i, serviceGroup := range currentState.ServiceGroups { + if clusterapi.AreEqual(plan.ServiceGroups[i].Services, serviceGroup.Services) { + currentState.ServiceGroups[i].Services = plan.ServiceGroups[i].Services + } + } + + //need to have proper check since we are passing 7.1 and response is returning 7.1.5 + c.populateInputServerVersionIfPresent(&state, currentState) + // Set state to fully populated data + diags = resp.State.Set(ctx, currentState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the project. +func (r *Cluster) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve values from state + var state providerschema.Cluster + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error deleting cluster", + "Could not delete cluster id "+state.Id.String()+" unexpected error: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[providerschema.OrganizationId] + projectId = resourceIDs[providerschema.ProjectId] + clusterId = resourceIDs[providerschema.ClusterId] + ) + + // Delete existing Cluster + _, err = r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s", r.HostURL, organizationId, projectId, clusterId), + http.MethodDelete, + nil, + r.Token, + nil, + ) + resourceNotFound, err := handleClusterError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error deleting cluster", + "Could not delete cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + + err = r.checkClusterStatus(ctx, state.OrganizationId.ValueString(), state.ProjectId.ValueString(), state.Id.ValueString()) + resourceNotFound, err = handleClusterError(err) + switch err { + case nil: + // This case will only occur when cluster deletion has failed, + // and the cluster record still exists in the cp metadata. Therefore, + // no error will be returned when performing a GET call. + cluster, err := r.retrieveCluster(ctx, state.OrganizationId.ValueString(), state.ProjectId.ValueString(), state.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error deleting cluster", + fmt.Sprintf("Could not delete cluster id %s: %s", state.Id.String(), err.Error()), + ) + return + } + resp.Diagnostics.AddError( + "Error deleting cluster", + fmt.Sprintf("Could not delete cluster id %s, as current Cluster state: %s", state.Id.String(), cluster.CurrentState), + ) + return + default: + if !resourceNotFound { + resp.Diagnostics.AddError( + "Error deleting cluster", + "Could not delete cluster id "+state.Id.String()+": "+err.Error(), + ) + return + } + } +} + +// ImportState imports a remote cluster that is not created by Terraform. +func (c *Cluster) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// getCluster retrieves cluster information from the specified organization and project +// using the provided cluster ID by open-api call +func (c *Cluster) getCluster(organizationId, projectId, clusterId string) (*clusterapi.GetClusterResponse, error) { + response, err := c.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s", c.HostURL, organizationId, projectId, clusterId), + http.MethodGet, + nil, + c.Token, + nil, + ) + if err != nil { + return nil, err + } + + clusterResp := clusterapi.GetClusterResponse{} + err = json.Unmarshal(response.Body, &clusterResp) + if err != nil { + return nil, err + } + clusterResp.Etag = response.Response.Header.Get("ETag") + return &clusterResp, nil +} + +// retrieveCluster retrieves cluster information for a specified organization, project, and cluster ID. +func (c *Cluster) retrieveCluster(ctx context.Context, organizationId, projectId, clusterId string) (*providerschema.Cluster, error) { + clusterResp, err := c.getCluster(organizationId, projectId, clusterId) + if err != nil { + return nil, err + } + + audit := providerschema.NewCouchbaseAuditData(clusterResp.Audit) + + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + return nil, errors.ErrUnableToConvertAuditData + } + + refreshedState, err := providerschema.NewCluster(clusterResp, organizationId, projectId, auditObj) + if err != nil { + return nil, err + } + return refreshedState, nil +} + +// checkClusterStatus monitors the status of a cluster creation, update and deletion operation for a specified +// organization, project, and cluster ID. It periodically fetches the cluster status using the `getCluster` +// function and waits until the cluster reaches a final state or until a specified timeout is reached. +// The function returns an error if the operation times out or encounters an error during status retrieval. +func (c *Cluster) checkClusterStatus(ctx context.Context, organizationId, projectId, ClusterId string) error { + var ( + clusterResp *clusterapi.GetClusterResponse + err error + ) + + // Assuming 60 minutes is the max time deployment takes, can change after discussion + const timeout = time.Minute * 60 + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + + const sleep = time.Second * 3 + + timer := time.NewTimer(2 * time.Minute) + + for { + select { + case <-ctx.Done(): + const msg = "cluster creation status transition timed out after initiation" + return fmt.Errorf(msg) + + case <-timer.C: + clusterResp, err = c.getCluster(organizationId, projectId, ClusterId) + switch err { + case nil: + if clusterapi.IsFinalState(clusterResp.CurrentState) { + return nil + } + const msg = "waiting for cluster to complete the execution" + tflog.Info(ctx, msg) + default: + return err + } + timer.Reset(sleep) + } + } +} + +// morphToApiServiceGroups converts a provider cluster serviceGroups to an API-compatible list of service groups. +func (c *Cluster) morphToApiServiceGroups(plan providerschema.Cluster) ([]clusterapi.ServiceGroup, error) { + var newServiceGroups []clusterapi.ServiceGroup + for _, serviceGroup := range plan.ServiceGroups { + numOfNodes := int(serviceGroup.NumOfNodes.ValueInt64()) + newServiceGroup := clusterapi.ServiceGroup{ + Node: &clusterapi.Node{ + Compute: clusterapi.Compute{ + Ram: int(serviceGroup.Node.Compute.Ram.ValueInt64()), + Cpu: int(serviceGroup.Node.Compute.Cpu.ValueInt64()), + }, + }, + NumOfNodes: &numOfNodes, + } + + switch plan.CloudProvider.Type.ValueString() { + case string(clusterapi.Aws): + node := clusterapi.Node{} + diskAws := clusterapi.DiskAWS{ + Type: clusterapi.DiskAWSType(serviceGroup.Node.Disk.Type.ValueString()), + } + + if serviceGroup.Node != nil && !serviceGroup.Node.Disk.Storage.IsNull() { + diskAws.Storage = int(serviceGroup.Node.Disk.Storage.ValueInt64()) + } + + if serviceGroup.Node != nil && !serviceGroup.Node.Disk.IOPS.IsNull() { + diskAws.Iops = int(serviceGroup.Node.Disk.IOPS.ValueInt64()) + } + + err := node.FromDiskAWS(diskAws) + if err != nil { + return nil, err + } + newServiceGroup.Node.Disk = node.Disk + + case string(clusterapi.Azure): + node := clusterapi.Node{} + diskAzure := clusterapi.DiskAzure{ + Type: clusterapi.DiskAzureType(serviceGroup.Node.Disk.Type.ValueString()), + } + + if serviceGroup.Node != nil && !serviceGroup.Node.Disk.Storage.IsNull() && !serviceGroup.Node.Disk.Storage.IsUnknown() { + storage := int(serviceGroup.Node.Disk.Storage.ValueInt64()) + diskAzure.Storage = &storage + } + + if serviceGroup.Node != nil && !serviceGroup.Node.Disk.IOPS.IsNull() && !serviceGroup.Node.Disk.Storage.IsUnknown() { + iops := int(serviceGroup.Node.Disk.IOPS.ValueInt64()) + diskAzure.Iops = &iops + } + if err := node.FromDiskAzure(diskAzure); err != nil { + return nil, err + } + newServiceGroup.Node.Disk = node.Disk + + case string(clusterapi.Gcp): + storage := int(serviceGroup.Node.Disk.Storage.ValueInt64()) + node := clusterapi.Node{} + err := node.FromDiskGCP(clusterapi.DiskGCP{ + Type: clusterapi.DiskGCPType(serviceGroup.Node.Disk.Type.ValueString()), + Storage: storage, + }) + if err != nil { + return nil, err + } + newServiceGroup.Node.Disk = node.Disk + } + var newServices []clusterapi.Service + for _, service := range serviceGroup.Services { + newService := service.ValueString() + newServices = append(newServices, clusterapi.Service(newService)) + } + newServiceGroup.Services = &newServices + newServiceGroups = append(newServiceGroups, newServiceGroup) + } + return newServiceGroups, nil +} + +// need to have proper check since we are passing 7.1 and response is returning 7.1.5 +func (c *Cluster) populateInputServerVersionIfPresent(stateOrPlanCluster *providerschema.Cluster, refreshStateCluster *providerschema.Cluster) { + if stateOrPlanCluster.CouchbaseServer != nil && + refreshStateCluster.CouchbaseServer != nil && + !stateOrPlanCluster.CouchbaseServer.Version.IsNull() && + !stateOrPlanCluster.CouchbaseServer.Version.IsUnknown() { + refreshStateCluster.CouchbaseServer.Version = stateOrPlanCluster.CouchbaseServer.Version + } +} + +// validateClusterUpdate checks if specific fields in a cluster can be updated and returns an error if not. +func (c *Cluster) validateClusterUpdate(plan, state providerschema.Cluster) error { + var planOrganizationId, stateOrganizationId string + if !plan.OrganizationId.IsNull() { + planOrganizationId = plan.OrganizationId.ValueString() + } + + if !state.OrganizationId.IsNull() { + stateOrganizationId = state.OrganizationId.ValueString() + } + + if planOrganizationId != stateOrganizationId { + return errors.ErrUnableToUpdateOrganizationId + } + + var planProjectId, stateProjectId string + if !plan.ProjectId.IsNull() { + planProjectId = plan.ProjectId.ValueString() + } + + if !state.ProjectId.IsNull() { + stateProjectId = state.ProjectId.ValueString() + } + + if planProjectId != stateProjectId { + return errors.ErrUnableToUpdateProjectId + } + + var planCouchbaseServerVersion, stateCouchbaseServerVersion string + if plan.CouchbaseServer != nil && !plan.CouchbaseServer.Version.IsNull() { + planCouchbaseServerVersion = plan.CouchbaseServer.Version.ValueString() + } + if state.CouchbaseServer != nil && !state.CouchbaseServer.Version.IsNull() { + stateCouchbaseServerVersion = state.CouchbaseServer.Version.ValueString() + } + + if planCouchbaseServerVersion != stateCouchbaseServerVersion { + return errors.ErrUnableToUpdateServerVersion + } + + var planAvailabilityType, stateAvailabilityType string + if plan.Availability != nil && !plan.Availability.Type.IsNull() { + planAvailabilityType = plan.Availability.Type.ValueString() + } + if state.Availability != nil && !state.Availability.Type.IsNull() { + stateAvailabilityType = state.Availability.Type.ValueString() + } + + if planAvailabilityType != stateAvailabilityType { + return errors.ErrUnableToUpdateAvailabilityType + } + + var planCloudProvider, stateCloudProvider providerschema.CloudProvider + if plan.CloudProvider != nil { + planCloudProvider = *plan.CloudProvider + } + if state.CloudProvider != nil { + stateCloudProvider = *state.CloudProvider + } + + if !reflect.DeepEqual(planCloudProvider, stateCloudProvider) { + return errors.ErrUnableToUpdateCloudProvider + } + + return nil +} + +// this func extract error message if error is api.Error and also checks whether error is +// resource not found +func handleClusterError(err error) (bool, error) { + switch err := err.(type) { + case nil: + return false, nil + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + return false, fmt.Errorf(err.CompleteError()) + } + return true, fmt.Errorf(err.CompleteError()) + default: + return false, err + } +} diff --git a/internal/resources/cluster_schema.go b/internal/resources/cluster_schema.go new file mode 100644 index 00000000..47201190 --- /dev/null +++ b/internal/resources/cluster_schema.go @@ -0,0 +1,86 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func ClusterSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_id": stringAttribute(required), + "project_id": stringAttribute(required), + "name": stringAttribute(required), + "description": stringAttribute(optional), + "cloud_provider": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "type": stringAttribute(required), + "region": stringAttribute(required), + "cidr": stringAttribute(required), + }, + }, + "couchbase_server": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "version": stringAttribute(optional, computed), + }, + }, + "service_groups": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "cpu": int64Attribute(required), + "ram": int64Attribute(required), + }, + }, + "disk": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "type": stringAttribute(required), + "storage": int64Attribute(optional, computed), + "iops": int64Attribute(optional, computed), + }, + }, + }, + }, + "num_of_nodes": int64Attribute(required), + "services": stringListAttribute(required), + }, + }, + }, + "availability": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "type": stringAttribute(required), + }, + }, + "support": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "plan": stringAttribute(required), + "timezone": stringAttribute(required), + }, + }, + "current_state": stringAttribute(computed), + "app_service_id": stringAttribute(optional, computed), + "audit": computedAuditAttribute(), + "if_match": stringAttribute(optional), + "etag": stringAttribute(computed), + }, + } +} diff --git a/internal/resources/database_credential.go b/internal/resources/database_credential.go new file mode 100644 index 00000000..9b3ef698 --- /dev/null +++ b/internal/resources/database_credential.go @@ -0,0 +1,515 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &DatabaseCredential{} + _ resource.ResourceWithConfigure = &DatabaseCredential{} + _ resource.ResourceWithImportState = &DatabaseCredential{} +) + +// DatabaseCredential is the database credential resource implementation. +type DatabaseCredential struct { + *providerschema.Data +} + +func NewDatabaseCredential() resource.Resource { + return &DatabaseCredential{} +} + +// Metadata returns the name that the database credential will follow in the terraform files. +// the name as per this function is capella_database_credential. +func (r *DatabaseCredential) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_database_credential" +} + +// Schema defines the schema for the database credential resource. +func (r *DatabaseCredential) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = DatabaseCredentialSchema() +} + +// Configure adds the provider configured client to the database credential resource. +func (r *DatabaseCredential) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.Data = data +} + +// Create creates a new database credential. This function will validate the mandatory fields in the resource.CreateRequest +// before invoking the Capella V4 API. +func (r *DatabaseCredential) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.DatabaseCredential + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if plan.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrOrganizationIdCannotBeEmpty.Error(), + ) + return + } + var organizationId = plan.OrganizationId.ValueString() + + if plan.ProjectId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrProjectIdCannotBeEmpty.Error(), + ) + return + } + var projectId = plan.ProjectId.ValueString() + + if plan.ClusterId.IsNull() { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+errors.ErrClusterIdCannotBeEmpty.Error(), + ) + return + } + var clusterId = plan.ClusterId.ValueString() + + dbCredRequest := api.CreateDatabaseCredentialRequest{ + Name: plan.Name.ValueString(), + } + + if !plan.Password.IsNull() { + dbCredRequest.Password = plan.Password.ValueString() + } + + dbCredRequest.Access = createAccess(plan) + + response, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/users", r.HostURL, organizationId, projectId, clusterId), + http.MethodPost, + dbCredRequest, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+err.Error(), + ) + return + } + + dbResponse := api.CreateDatabaseCredentialResponse{} + err = json.Unmarshal(response.Body, &dbResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating database credential", + "Could not create database credential, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := r.retrieveDatabaseCredential(ctx, organizationId, projectId, clusterId, dbResponse.Id.String()) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Database Credentials", + "Could not read Capella database credential with ID "+dbResponse.Id.String()+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Database Credentials", + "Could not read Capella database credential with ID "+dbResponse.Id.String()+": "+err.Error(), + ) + return + } + + refreshedState.Password = types.StringValue(dbResponse.Password) + // store the password that was either auto-generated or supplied during credential creation request. + // todo: there is a bug in the V4 public APIs where the API returns the password in the response only if it is auto-generated. + // This will be fixed in AV-62867. + // For now, we are working around this issue. + if dbResponse.Password == "" { + // this means the customer had provided a password in the terraform file during creation, store that. + refreshedState.Password = plan.Password + } + + // todo: there is a bug in cp-open-api where the access field is empty in the GET API response, + // we are going to work around this for private preview. + // The fix will be done in SURF-7366 + // For now, we are appending same permissions that the customer passed in the terraform files and not relying on the GET API response. + refreshedState.Access = mapAccess(plan) + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read reads database credential information. +func (r *DatabaseCredential) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.DatabaseCredential + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + dbId, clusterId, projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Database Credentials in Capella", + "Could not read Capella database credential with ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + // Get refreshed Cluster value from Capella + refreshedState, err := r.retrieveDatabaseCredential(ctx, organizationId, projectId, clusterId, dbId) + resourceNotFound, err := handleDatabaseCredentialError(err) + if resourceNotFound { + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + "Error reading database credential", + "Could not read database credential with id "+state.Id.String()+": "+err.Error(), + ) + return + } + + // if the user had provided the password in the input, we store that in the terraform state file. + refreshedState.Password = state.Password + + // todo: there is a bug in cp-open-api where the access field is empty in the GET API response, + // we are going to work around this for private preview. + // The fix will be done in SURF-7366 + // For now, we are appending same permissions that the customer passed in the terraform files and not relying on the GET API response. + refreshedState.Access = mapAccess(state) + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the database credential. +func (r *DatabaseCredential) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state providerschema.DatabaseCredential + diags := req.Plan.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + dbId, clusterId, projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Database Credentials in Capella", + "Could not read Capella database credential with ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + dbCredRequest := api.PutDatabaseCredentialRequest{ + // it is expected that the password in the state file will never be empty. + Password: state.Password.ValueString(), + } + + dbCredRequest.Access = createAccess(state) + + _, err = r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/users/%s", r.HostURL, organizationId, projectId, clusterId, dbId), + http.MethodPut, + dbCredRequest, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error updating database credential", + "Could not update an existing database credential, unexpected error: "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error updating database credential", + "Could not update database credential, unexpected error: "+err.Error(), + ) + return + } + + currentState, err := r.retrieveDatabaseCredential(ctx, organizationId, projectId, clusterId, dbId) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Database Credentials", + "Could not read Capella database credential with ID "+dbId+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Database Credentials", + "Could not read Capella database credential with ID "+dbId+": "+err.Error(), + ) + return + } + + // this will ensure that the state file stores the new updated password, if password is not to be updated, it will retain the older one. + currentState.Password = state.Password + + // todo: there is a bug in cp-open-api where the access field is empty in the GET API response, + // we are going to work around this for private preview. + // The fix will be done in SURF-7366 + // For now, we are appending same permissions that the customer passed in the terraform files and not relying on the GET API response. + currentState.Access = mapAccess(state) + + // Set state to fully populated data + diags = resp.State.Set(ctx, currentState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the database credential. +func (r *DatabaseCredential) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve values from state + var state providerschema.DatabaseCredential + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + dbId, clusterId, projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Database Credentials in Capella", + "Could not read Capella database credential with ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + _, err = r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/users/%s", r.HostURL, organizationId, projectId, clusterId, dbId), + http.MethodDelete, + nil, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Deleting the Database Credential", + "Could not delete Database Credential associated with cluster "+clusterId+": "+err.CompleteError(), + ) + return + } + default: + resp.Diagnostics.AddError( + "Error Deleting Database Credential", + "Could not delete Database Credential associated with cluster "+clusterId+": "+err.Error(), + ) + return + } +} + +// ImportState imports a remote database credential that is not created by Terraform. +// Since Capella APIs may require multiple IDs, such as organizationId, projectId, clusterId, +// this function passes the root attribute which is a comma separated string of multiple IDs. +// example: id=user123,organization_id=org123,project_id=proj123,cluster_id=cluster123 +// Unfortunately the terraform import CLI doesn't allow us to pass multiple IDs at this point +// and hence this workaround has been applied. +func (r *DatabaseCredential) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// retrieveDatabaseCredential fetches the database credential by making a GET API call to the Capella V4 Public API. +// This usually helps retrieve the state of a newly created database credential that was created from Terraform. +func (r *DatabaseCredential) retrieveDatabaseCredential(ctx context.Context, organizationId, projectId, clusterId, dbId string) (*providerschema.OneDatabaseCredential, error) { + response, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s/clusters/%s/users/%s", r.HostURL, organizationId, projectId, clusterId, dbId), + http.MethodGet, + nil, + r.Token, + nil, + ) + if err != nil { + return nil, err + } + + dbResp := api.GetDatabaseCredentialResponse{} + err = json.Unmarshal(response.Body, &dbResp) + if err != nil { + return nil, err + } + + refreshedState := providerschema.OneDatabaseCredential{ + Id: types.StringValue(dbResp.Id.String()), + Name: types.StringValue(dbResp.Name), + OrganizationId: types.StringValue(organizationId), + ProjectId: types.StringValue(projectId), + ClusterId: types.StringValue(clusterId), + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: types.StringValue(dbResp.Audit.CreatedAt.String()), + CreatedBy: types.StringValue(dbResp.Audit.CreatedBy), + ModifiedAt: types.StringValue(dbResp.Audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(dbResp.Audit.ModifiedBy), + Version: types.Int64Value(int64(dbResp.Audit.Version)), + }, + } + // todo: there is a bug in cp-open-api where the access field is empty in the GET API response, + // we are going to work around this for private preview. + // The fix will be done in SURF-7366 + // For now, we are appending same permissions that the customer passed in the terraform files and not relying on the GET API response. + // the below code will be uncommented once the bug is fixed. + /* for i, access := range dbResp.Access { + refreshedState.Access[i] = providerschema.Access{} + for _, permission := range access.Privileges { + refreshedState.Access[i].Privileges = append(refreshedState.Access[i].Privileges, types.StringValue(permission)) + } + } + */ + return &refreshedState, nil +} + +// this func extract error message if error is api.Error and also checks whether error is +// resource not found +func handleDatabaseCredentialError(err error) (bool, error) { + switch err := err.(type) { + case nil: + return false, nil + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + return false, fmt.Errorf(err.CompleteError()) + } + return true, fmt.Errorf(err.CompleteError()) + default: + return false, err + } +} + +// todo: add a unit test for this, tracking under: https://couchbasecloud.atlassian.net/browse/AV-63401 +func createAccess(input providerschema.DatabaseCredential) []api.Access { + var access = make([]api.Access, len(input.Access)) + + for i, acc := range input.Access { + access[i] = api.Access{Privileges: make([]string, len(acc.Privileges))} + for j, permission := range acc.Privileges { + access[i].Privileges[j] = permission.ValueString() + } + if acc.Resources != nil { + if acc.Resources.Buckets != nil { + access[i].Resources = &api.AccessibleResources{Buckets: make([]api.Bucket, len(acc.Resources.Buckets))} + for k, bucket := range acc.Resources.Buckets { + access[i].Resources.Buckets[k].Name = acc.Resources.Buckets[k].Name.ValueString() + if bucket.Scopes != nil { + access[i].Resources.Buckets[k].Scopes = make([]api.Scope, len(bucket.Scopes)) + for s, scope := range bucket.Scopes { + access[i].Resources.Buckets[k].Scopes[s].Name = scope.Name.ValueString() + if scope.Collections != nil { + access[i].Resources.Buckets[k].Scopes[s].Collections = make([]string, len(scope.Collections)) + for c, coll := range scope.Collections { + access[i].Resources.Buckets[k].Scopes[s].Collections[c] = coll.ValueString() + } + } + } + } + } + } + } else { + // todo: There is a bug in the PUT V4 API where we cannot pass empty buckets list as it leads to a nil pointer exception. + // to workaround this bug, I have temporarily added a fix where we pass an empty list of buckets if the terraform input field doesn't contain any buckets. + // fix for the V4 API bug will come as part of https://couchbasecloud.atlassian.net/browse/AV-63388 + + access[i].Resources = &api.AccessibleResources{Buckets: make([]api.Bucket, 0)} + } + } + + return access +} + +// mapAccess needs a 1:1 mapping when we store the output as the refreshed state. +// todo: add a unit test, tracking under: https://couchbasecloud.atlassian.net/browse/AV-63401 +func mapAccess(plan providerschema.DatabaseCredential) []providerschema.Access { + var access = make([]providerschema.Access, len(plan.Access)) + + for i, acc := range plan.Access { + access[i] = providerschema.Access{Privileges: make([]types.String, len(acc.Privileges))} + for j, permission := range acc.Privileges { + access[i].Privileges[j] = permission + } + if acc.Resources != nil { + if acc.Resources.Buckets != nil { + access[i].Resources = &providerschema.Resources{Buckets: make([]providerschema.BucketResource, len(acc.Resources.Buckets))} + for k, bucket := range acc.Resources.Buckets { + access[i].Resources.Buckets[k].Name = acc.Resources.Buckets[k].Name + if bucket.Scopes != nil { + access[i].Resources.Buckets[k].Scopes = make([]providerschema.Scope, len(bucket.Scopes)) + for s, scope := range bucket.Scopes { + access[i].Resources.Buckets[k].Scopes[s].Name = scope.Name + if scope.Collections != nil { + access[i].Resources.Buckets[k].Scopes[s].Collections = make([]types.String, len(scope.Collections)) + for c, coll := range scope.Collections { + access[i].Resources.Buckets[k].Scopes[s].Collections[c] = coll + } + } + } + } + } + } + } + } + + return access +} diff --git a/internal/resources/database_credential_schema.go b/internal/resources/database_credential_schema.go new file mode 100644 index 00000000..d785c704 --- /dev/null +++ b/internal/resources/database_credential_schema.go @@ -0,0 +1,59 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +// DatabaseCredentialSchema defines the schema for the terraform provider resource - "DatabaseCredential". +// This terraform resource directly maps to the database credential created for a Capella cluster. +// DatabaseCredential resource supports Create, Destroy, Read, Import and List operations. +func DatabaseCredentialSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "name": stringAttribute(required), + "password": stringAttribute(optional, computed, sensitive), + "organization_id": stringAttribute(required), + "project_id": stringAttribute(required), + "cluster_id": stringAttribute(required), + "audit": computedAuditAttribute(), + "access": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "privileges": stringListAttribute(required), + "resources": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "buckets": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": stringAttribute(required), + "scopes": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": stringAttribute(required), + "collections": stringListAttribute(optional), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/resources/project.go b/internal/resources/project.go new file mode 100644 index 00000000..bf1d0e79 --- /dev/null +++ b/internal/resources/project.go @@ -0,0 +1,375 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/terraform-plugin-log/tflog" + + "terraform-provider-capella/internal/api" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &Project{} + _ resource.ResourceWithConfigure = &Project{} + _ resource.ResourceWithImportState = &Project{} +) + +// Project is the project resource implementation. +type Project struct { + *providerschema.Data +} + +func NewProject() resource.Resource { + return &Project{} +} + +// Metadata returns the project resource type name. +func (r *Project) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_project" +} + +// Schema defines the schema for the project resource. +func (r *Project) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = ProjectSchema() +} + +// Configure adds the provider configured client to the project resource. +func (r *Project) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + r.Data = data +} + +// Create creates a new project. +func (r *Project) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.Project + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if plan.OrganizationId.IsNull() { + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: organization ID cannot be empty.", + ) + return + } + var organizationId = plan.OrganizationId.ValueString() + + projectRequest := api.CreateProjectRequest{ + Description: plan.Description.ValueString(), + Name: plan.Name.ValueString(), + } + + response, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects", r.HostURL, organizationId), + http.MethodPost, + projectRequest, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: "+err.Error(), + ) + return + } + + projectResponse := api.GetProjectResponse{} + err = json.Unmarshal(response.Body, &projectResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating project", + "Could not create project, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := r.retrieveProject(ctx, organizationId, projectResponse.Id.String()) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+projectResponse.Id.String()+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+projectResponse.Id.String()+": "+err.Error(), + ) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read reads project information. +func (r *Project) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state providerschema.Project + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + // Get refreshed project value from Capella + refreshedState, err := r.retrieveProject(ctx, organizationId, projectId) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+projectId+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+projectId+": "+err.Error(), + ) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the project. +func (r *Project) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var state providerschema.Project + diags := req.Plan.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Updating Capella Project", + "Could not update Capella project ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + projectRequest := api.PutProjectRequest{ + Description: state.Description.ValueString(), + Name: state.Name.ValueString(), + } + + var headers = make(map[string]string) + if !state.IfMatch.IsUnknown() && !state.IfMatch.IsNull() { + headers["If-Match"] = state.IfMatch.ValueString() + } + + _, err = r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s", r.HostURL, organizationId, projectId), + http.MethodPut, + projectRequest, + r.Token, + headers, + ) + switch err := err.(type) { + case nil: + case api.Error: + resp.Diagnostics.AddError( + "Error Updating Capella Projects", + "Could not update Capella project ID "+state.Id.String()+": "+err.CompleteError(), + ) + return + default: + resp.Diagnostics.AddError( + "Error Updating Capella Projects", + "Could not update Capella project ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + currentState, err := r.retrieveProject(ctx, organizationId, projectId) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+state.Id.String()+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella Projects", + "Could not read Capella project ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + if !state.IfMatch.IsUnknown() && !state.IfMatch.IsNull() { + currentState.IfMatch = state.IfMatch + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, currentState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the project. +func (r *Project) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve values from state + var state providerschema.Project + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + projectId, organizationId, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Updating Capella Project", + "Could not update Capella project ID "+state.Id.String()+": "+err.Error(), + ) + return + } + + _, err = r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s", r.HostURL, organizationId, projectId), + http.MethodDelete, + nil, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != 404 { + resp.Diagnostics.AddError( + "Error Deleting Capella Projects", + "Could not delete Capella project ID "+projectId+": "+err.CompleteError(), + ) + tflog.Info(ctx, "resource doesn't exist in remote server") + return + } + default: + resp.Diagnostics.AddError( + "Error Deleting Capella Projects", + "Could not delete Capella project ID "+projectId+": "+err.Error(), + ) + return + } +} + +// ImportState imports a remote project that is not created by Terraform. +// Since Capella APIs may require multiple IDs, such as organizationId, projectId, clusterId, +// this function passes the root attribute which is a comma separated string of multiple IDs. +// example: id=proj123,organization_id=org123 +// Unfortunately the terraform import CLI doesn't allow us to pass multiple IDs at this point +// and hence this workaround has been applied. +func (r *Project) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func (r *Project) retrieveProject(ctx context.Context, organizationId, projectId string) (*providerschema.OneProject, error) { + response, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/projects/%s", r.HostURL, organizationId, projectId), + http.MethodGet, + nil, + r.Token, + nil, + ) + if err != nil { + return nil, err + } + + projectResp := api.GetProjectResponse{} + err = json.Unmarshal(response.Body, &projectResp) + if err != nil { + return nil, err + } + + projectResp.Etag = response.Response.Header.Get("ETag") + + refreshedState := providerschema.OneProject{ + Id: types.StringValue(projectResp.Id.String()), + OrganizationId: types.StringValue(organizationId), + Name: types.StringValue(projectResp.Name), + Description: types.StringValue(projectResp.Description), + Audit: providerschema.CouchbaseAuditData{ + CreatedAt: types.StringValue(projectResp.Audit.CreatedAt.String()), + CreatedBy: types.StringValue(projectResp.Audit.CreatedBy), + ModifiedAt: types.StringValue(projectResp.Audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(projectResp.Audit.ModifiedBy), + Version: types.Int64Value(int64(projectResp.Audit.Version)), + }, + Etag: types.StringValue(projectResp.Etag), + } + + return &refreshedState, nil +} diff --git a/internal/resources/project_schema.go b/internal/resources/project_schema.go new file mode 100644 index 00000000..95ba24cb --- /dev/null +++ b/internal/resources/project_schema.go @@ -0,0 +1,26 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func ProjectSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "organization_id": stringAttribute(required), + "name": stringAttribute(required), + "description": stringAttribute(optional), + "if_match": stringAttribute(optional), + "etag": stringAttribute(computed), + "audit": computedAuditAttribute(), + }, + } +} diff --git a/internal/resources/user.go b/internal/resources/user.go new file mode 100644 index 00000000..0558e29a --- /dev/null +++ b/internal/resources/user.go @@ -0,0 +1,357 @@ +package resources + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + providerschema "terraform-provider-capella/internal/schema" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &User{} + _ resource.ResourceWithConfigure = &User{} + _ resource.ResourceWithImportState = &User{} +) + +const ( + organizationIdKey = "organizationId" + userIdKey = "userId" +) + +// User is the User resource implementation +type User struct { + *providerschema.Data +} + +func NewUser() resource.Resource { + return &User{} +} + +// Metadata returns the users resource type name +func (r *User) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_user" +} + +// Schema defines the schema for the allowlist resource. +func (r *User) Schema(ctx context.Context, rsc resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = UserSchema() +} + +// Configure sets provider-defined data, clients, etc. that is passed to data sources or resources in the provider. +func (r *User) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + data, ok := req.ProviderData.(*providerschema.Data) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *ProviderSourceData, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + r.Data = data +} + +// Create creates a new user +func (r *User) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan providerschema.User + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + err := r.validateCreateUserRequest(plan) + if err != nil { + resp.Diagnostics.AddError( + "Error parsing create user request", + "Could not create user "+err.Error(), + ) + return + } + var organizationId = plan.OrganizationId.ValueString() + + createUserRequest := api.CreateUserRequest{ + Name: plan.Name.ValueString(), + Email: plan.Email.ValueString(), + OrganizationRoles: providerschema.ConvertOrganizationRoles(plan.OrganizationRoles), + Resources: providerschema.ConvertResources(plan.Resources), + } + + // Execute request + response, err := r.Client.Execute( + fmt.Sprintf("%s/v4/organizations/%s/users", r.HostURL, organizationId), + http.MethodPost, + createUserRequest, + r.Token, + nil, + ) + if err != nil { + resp.Diagnostics.AddError( + "Error executing request", + "Could not execute request, unexpected error: "+err.Error(), + ) + return + } + + createUserResponse := api.CreateUserResponse{} + err = json.Unmarshal(response.Body, &createUserResponse) + if err != nil { + resp.Diagnostics.AddError( + "Error creating user", + "Could not create user, unexpected error: "+err.Error(), + ) + return + } + + refreshedState, err := r.refreshUser(ctx, organizationId, createUserResponse.Id.String()) + if err != nil { + resp.Diagnostics.AddError( + "Error reading user", + "Could not read user, unexpected error: "+err.Error(), + ) + return + } + + // Set state to fully populated data + diags = resp.State.Set(ctx, refreshedState) + + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *User) validateCreateUserRequest(plan providerschema.User) error { + if plan.OrganizationId.IsNull() { + return errors.ErrOrganizationIdCannotBeEmpty + } + if plan.Email.IsNull() { + return errors.ErrEmailCannotBeEmpty + } + if plan.OrganizationRoles == nil { + return errors.ErrOrganizationRolesCannotBeEmpty + } + return nil +} + +// Read reads user information +func (r *User) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state providerschema.User + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + // Validate parameters were successfully imported + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Reading Capella AllowList", + "Could not read Capella allow list: "+err.Error(), + ) + return + } + + var ( + organizationId = resourceIDs[organizationIdKey] + userId = resourceIDs[userIdKey] + ) + + // Refresh the existing user + refreshedState, err := r.refreshUser(ctx, organizationId, userId) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Reading Capella User", + "Could not read Capella userID "+userId+": "+err.CompleteError(), + ) + return + } + tflog.Info(ctx, "resource doesn't exist in remote server removing resource from state file") + resp.State.RemoveResource(ctx) + return + default: + resp.Diagnostics.AddError( + "Error Reading Capella User", + "Could not read Capella userID "+userId+": "+err.Error(), + ) + return + } + + // Set refreshed state + diags = resp.State.Set(ctx, &refreshedState) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} + +// Update updates the user +func (r *User) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Couchbase Capella's v4 does not support a PUT endpoint for users. + // Users are instead updated via a PATCH request. + // http://cbc-cp-api.s3-website-us-east-1.amazonaws.com/#tag/allowedCIDRs(Cluster) + // + // The update logic has been therefore been left blank. In this situation, terraform apply + // will default to deleting and executing a new create. + // https://developer.hashicorp.com/terraform/plugin/framework/resources/update + // + // TODO (AV-63471): Implement logic to parse and execute a PATCH request +} + +// Delete deletes the user +func (r *User) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Retrieve existing state + var state providerschema.User + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + resourceIDs, err := state.Validate() + if err != nil { + resp.Diagnostics.AddError( + "Error Deleting Capella User", + "Could not delete Capella user: "+err.Error(), + ) + return + } + // Execute request to delete existing user + _, err = r.Client.Execute( + fmt.Sprintf( + "%s/v4/organizations/%s/users/%s", + r.HostURL, + resourceIDs[organizationIdKey], + resourceIDs[userIdKey], + ), + http.MethodDelete, + nil, + r.Token, + nil, + ) + switch err := err.(type) { + case nil: + case api.Error: + if err.HttpStatusCode != http.StatusNotFound { + resp.Diagnostics.AddError( + "Error Deleting Capella User", + "Could not delete Capella userId "+resourceIDs[userIdKey]+": "+err.CompleteError(), + ) + tflog.Info(ctx, "resource doesn't exist in remote server") + return + } + default: + resp.Diagnostics.AddError( + "Error Deleting Capella User", + "Could not delete Capella userId "+resourceIDs[userIdKey]+": "+err.Error(), + ) + return + } +} + +// getUser is used to retrieve an existing user +func (r *User) getUser(ctx context.Context, organizationId, userId string) (*api.GetUserResponse, error) { + response, err := r.Client.Execute( + fmt.Sprintf( + "%s/v4/organizations/%s/users/%s", + r.HostURL, + organizationId, + userId, + ), + http.MethodGet, + nil, + r.Token, + nil, + ) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrExecutingRequest, err) + } + + userResp := api.GetUserResponse{} + err = json.Unmarshal(response.Body, &userResp) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrUnmarshallingResponse, err) + } + return &userResp, nil +} + +func (r *User) refreshUser(ctx context.Context, organizationId, userId string) (*providerschema.User, error) { + userResp, err := r.getUser(ctx, organizationId, userId) + if err != nil { + return nil, handleCapellaUserError(err) + } + + audit := providerschema.NewCouchbaseAuditData(userResp.Audit) + auditObj, diags := types.ObjectValueFrom(ctx, audit.AttributeTypes(), audit) + if diags.HasError() { + return nil, errors.ErrUnableToConvertAuditData + } + + // Set optional fields - these may be left blank + var name basetypes.StringValue + if userResp.Name != nil { + name = types.StringValue(*userResp.Name) + } + + refreshedState := providerschema.NewUser( + types.StringValue(userResp.Id.String()), + name, + types.StringValue(userResp.Email), + types.StringValue(userResp.Status), + types.BoolValue(userResp.Inactive), + types.StringValue(userResp.OrganizationId.String()), + providerschema.MorphOrganizationRoles(userResp.OrganizationRoles), + types.StringValue(userResp.LastLogin), + types.StringValue(userResp.Region), + types.StringValue(userResp.TimeZone), + types.BoolValue(userResp.EnableNotifications), + types.StringValue(userResp.ExpiresAt), + providerschema.MorphResources(userResp.Resources), + auditObj, + ) + return refreshedState, nil +} + +// ImportState imports a remote user that was not created by Terraform. +// Since Capella APIs may require multiple IDs, such as organizationId, projectId, clusterId, +// this function passes the root attribute which is a comma separated string of multiple IDs. +// example: id=cluster123,project_id=proj123,organization_id=org123 +// Unfortunately the terraform import CLI doesn't allow us to pass multiple IDs at this point +// and hence this workaround has been applied. +func (r *User) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// handleCapellaUserError is used to differentiate between error types which +// may be returned during requests to capella. +func handleCapellaUserError(err error) error { + switch err := err.(type) { + case nil: + case api.Error: + return fmt.Errorf("%w: %s", errors.ErrUnableToReadCapellaUser, err.CompleteError()) + default: + return fmt.Errorf("%w: %s", errors.ErrUnableToReadCapellaUser, err.Error()) + } + return nil +} diff --git a/internal/resources/user_schema.go b/internal/resources/user_schema.go new file mode 100644 index 00000000..2c09f63c --- /dev/null +++ b/internal/resources/user_schema.go @@ -0,0 +1,35 @@ +package resources + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +func UserSchema() schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": stringAttribute(computed, requiresReplace), + "name": stringAttribute(optional, requiresReplace), + "status": stringAttribute(computed, requiresReplace), + "inactive": boolAttribute(computed, requiresReplace), + "email": stringAttribute(required, requiresReplace), + "organization_id": stringAttribute(required, requiresReplace), + "organization_roles": stringListAttribute(required, requiresReplace), + "last_login": stringAttribute(computed, requiresReplace), + "region": stringAttribute(computed, requiresReplace), + "time_zone": stringAttribute(computed, requiresReplace), + "enable_notifications": boolAttribute(computed, requiresReplace), + "expires_at": stringAttribute(computed, requiresReplace), + "resources": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "type": stringAttribute(optional, requiresReplace), + "id": stringAttribute(required, requiresReplace), + "roles": stringListAttribute(required, requiresReplace), + }, + }, + }, + "audit": computedAuditAttribute(), + }, + } +} diff --git a/internal/schema/allowlist.go b/internal/schema/allowlist.go new file mode 100644 index 00000000..7baeaf69 --- /dev/null +++ b/internal/schema/allowlist.go @@ -0,0 +1,160 @@ +package schema + +import ( + "fmt" + "strings" + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// AllowList maps AllowList resource schema data +type AllowList struct { + // Cidr represents the trusted CIDR to allow the database connections from. + Cidr types.String `tfsdk:"cidr"` + + // Comment is a short description of the allowed CIDR. + Comment types.String `tfsdk:"comment"` + + // ExpiresAt is an RFC3339 timestamp determining when the allowed CIDR should expire. + ExpiresAt types.String `tfsdk:"expires_at"` + + // Id is a GUID4 identifier of the allowlist. + Id types.String `tfsdk:"id"` + + // OrganizationId is the organizationId of the capella tenant. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the capella tenant. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the clusterId of the capella tenant. + ClusterId types.String `tfsdk:"cluster_id"` + + IfMatch types.String `tfsdk:"if_match"` + + // Audit represents all audit-related fields. It is of types.Object type to avoid conversion error for a nested field. + Audit types.Object `tfsdk:"audit"` +} + +// AllowLists defines the model for GetAllowLists. +type AllowLists struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the capella tenant. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the clusterId of the capella tenant. + ClusterId types.String `tfsdk:"cluster_id"` + + // Data contains the list of resources. + Data []OneAllowList `tfsdk:"data"` +} + +// Validate is used to verify that IDs have been properly imported +func (a *AllowList) Validate() (map[string]string, error) { + const idDelimiter = "," + var found bool + + organizationId := a.OrganizationId.ValueString() + projectId := a.ProjectId.ValueString() + clusterId := a.ClusterId.ValueString() + allowListId := a.Id.ValueString() + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if a.OrganizationId.IsNull() { + strs := strings.Split(a.Id.ValueString(), idDelimiter) + if len(strs) != 4 { + return nil, errors.ErrIdMissing + } + + _, allowListId, found = strings.Cut(strs[0], "id=") + if !found { + return nil, errors.ErrAllowListIdMissing + } + + _, organizationId, found = strings.Cut(strs[1], "organization_id=") + if !found { + return nil, errors.ErrOrganizationIdMissing + } + + _, projectId, found = strings.Cut(strs[2], "project_id=") + if !found { + return nil, errors.ErrProjectIdMissing + } + + _, clusterId, found = strings.Cut(strs[3], "cluster_id=") + if !found { + return nil, errors.ErrClusterIdMissing + } + } + + resourceIDs := a.generateResourceIdMap(organizationId, projectId, clusterId, allowListId) + + err := a.checkEmpty(resourceIDs) + if err != nil { + return nil, fmt.Errorf("%s: %v", errors.ErrUnableToImportResource, err) + } + + return resourceIDs, nil +} + +// generateResourceIdmap is used to populate a map with selected IDs +func (a *AllowList) generateResourceIdMap(organizationId, projectId, clusterId, allowListId string) map[string]string { + return map[string]string{ + "organizationId": organizationId, + "projectId": projectId, + "clusterId": clusterId, + "allowListId": allowListId, + } +} + +// checkEmpty is used to verify that a supplied resourceId map has been populated +func (a *AllowList) checkEmpty(resourceIdMap map[string]string) error { + if resourceIdMap["allowListId"] == "" { + return errors.ErrAllowListIdCannotBeEmpty + } + + if resourceIdMap["clusterId"] == "" { + return errors.ErrClusterIdCannotBeEmpty + } + + if resourceIdMap["projectId"] == "" { + return errors.ErrProjectIdCannotBeEmpty + } + + if resourceIdMap["organizationId"] == "" { + return errors.ErrOrganizationIdCannotBeEmpty + } + return nil +} + +// OneAllowList maps allowlist resource schema data; there is a separate response object to avoid conversion error for nested fields. +type OneAllowList struct { + // Audit represents all audit-related fields. + Audit CouchbaseAuditData `tfsdk:"audit"` + + // Cidr is the trusted CIDR to allow the database connections from. + Cidr types.String `tfsdk:"cidr"` + + // Comment is a short description of the allowed CIDR. + Comment types.String `tfsdk:"comment"` + + // ExpiresAt is an RFC3339 timestamp determining when the allowed CIDR should expire. + ExpiresAt types.String `tfsdk:"expires_at"` + + // Id is a GUID4 identifier of the project. + Id types.String `tfsdk:"id"` + + // OrganizationId is he organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the capella tenant. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the clusterId of the capella tenant. + ClusterId types.String `tfsdk:"cluster_id"` + + IfMatch types.String `tfsdk:"if_match"` +} diff --git a/internal/schema/apikey.go b/internal/schema/apikey.go new file mode 100644 index 00000000..eae73d7d --- /dev/null +++ b/internal/schema/apikey.go @@ -0,0 +1,350 @@ +package schema + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" +) + +// ApiKeyResourcesItems defines model for APIKeyResourcesItems. +type ApiKeyResourcesItems struct { + // Id is the id of the project. + Id types.String `tfsdk:"id"` + + // Roles is the Project Roles associated with the API key. + // To learn more about Project Roles, + // see [Project Roles](https://docs.couchbase.com/cloud/projects/project-roles.html). + Roles []types.String `tfsdk:"roles"` + + // Type is the type of the resource. + Type types.String `tfsdk:"type"` +} + +// ApiKey maps ApiKey resource schema data +type ApiKey struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // AllowedCIDRs is the list of inbound CIDRs for the API key. + // The system making a request must come from one of the allowed CIDRs. + AllowedCIDRs types.List `tfsdk:"allowed_cidrs"` + Audit types.Object `tfsdk:"audit"` + + // Description is the description for the API key. + Description types.String `tfsdk:"description"` + + // Expiry is the expiry of the API key in number of days. + // If set to -1, the token will not expire. + Expiry types.Float64 `tfsdk:"expiry"` + + // Id is the id is a unique identifier for an apiKey. + Id types.String `tfsdk:"id"` + + // Name is the name of the API key. + Name types.String `tfsdk:"name"` + + // OrganizationRoles are the organization level roles granted to the API key. + OrganizationRoles []types.String `tfsdk:"organization_roles"` + + // Resources is the resources are the resource level permissions associated + // with the API key. To learn more about Organization Roles, see + // [Organization Roles](https://docs.couchbase.com/cloud/organizations/organization-user-roles.html). + Resources []ApiKeyResourcesItems `tfsdk:"resources"` + + // Rotate is set only when updating(rotating) the API key, + // and it should be set be set in incremental order from + // the previously set rotate value, ideally we should start. + // it from 1 when we are rotating for first time. + Rotate types.Number `tfsdk:"rotate"` + + // Secret associated with API key. One has to follow the secret key policy, + // such as allowed characters and a length of 64 characters. If this field + // is left empty, a secret will be auto-generated. + Secret types.String `tfsdk:"secret"` + + // Token is a confidential piece of information that is used to authorize + // requests made to v4 endpoints. + Token types.String `tfsdk:"token"` +} + +// NewApiKey creates new apikey object +func NewApiKey(apiKey *api.GetApiKeyResponse, organizationId string, auditObject basetypes.ObjectValue) (*ApiKey, error) { + newApiKey := ApiKey{ + Id: types.StringValue(apiKey.Id), + OrganizationId: types.StringValue(organizationId), + Name: types.StringValue(apiKey.Name), + Description: types.StringValue(apiKey.Description), + Expiry: types.Float64Value(float64(apiKey.Expiry)), + Audit: auditObject, + } + + newAllowedCidrs, err := MorphAllowedCidrs(apiKey.AllowedCIDRs) + if err != nil { + return nil, err + } + + newApiKey.AllowedCIDRs = newAllowedCidrs + + newApiKey.OrganizationRoles = MorphApiKeyOrganizationRoles(apiKey.OrganizationRoles) + + newApiKey.Resources = MorphApiKeyResources(apiKey.Resources) + + return &newApiKey, nil +} + +// MorphAllowedCidrs is used to convert string list to basetypes.ListValue +// TODO : add unit testing +func MorphAllowedCidrs(allowedCIDRs []string) (basetypes.ListValue, error) { + var newAllowedCidr []attr.Value + for _, allowedCidr := range allowedCIDRs { + newAllowedCidr = append(newAllowedCidr, types.StringValue(allowedCidr)) + } + + newAllowedCidrs, diags := types.ListValue(types.StringType, newAllowedCidr) + if diags.HasError() { + return types.ListUnknown(types.StringType), fmt.Errorf("error while converting allowedcidrs") + } + + return newAllowedCidrs, nil +} + +// MorphApiKeyOrganizationRoles is used to convert nested organizationRoles from +// strings to terraform type.String. +// TODO : add unit testing +func MorphApiKeyOrganizationRoles(organizationRoles []string) []basetypes.StringValue { + var newOrganizationRoles []types.String + for _, organizationRole := range organizationRoles { + newOrganizationRoles = append(newOrganizationRoles, types.StringValue(string(organizationRole))) + } + return newOrganizationRoles +} + +// MorphApiKeyResources is used to covert nested resources from strings +// to terraform types.String +// TODO : add unit testing +func MorphApiKeyResources(resources api.Resources) []ApiKeyResourcesItems { + var newApiKeyResourcesItems []ApiKeyResourcesItems + for _, resource := range resources { + newResourceItem := ApiKeyResourcesItems{ + Id: types.StringValue(resource.Id.String()), + } + if resource.Type != nil { + newResourceItem.Type = types.StringValue(*resource.Type) + } + var newRoles []types.String + for _, role := range resource.Roles { + newRoles = append(newRoles, types.StringValue(string(role))) + } + newResourceItem.Roles = newRoles + newApiKeyResourcesItems = append(newApiKeyResourcesItems, newResourceItem) + } + return newApiKeyResourcesItems +} + +// Validate checks the validity of an API key and extracts associated IDs. +// TODO : add unit testing +func (a *ApiKey) Validate() (map[string]string, error) { + const idDelimiter = "," + var found bool + + organizationId := a.OrganizationId.ValueString() + apiKeyId := a.Id.ValueString() + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if a.OrganizationId.IsNull() { + strs := strings.Split(a.Id.ValueString(), idDelimiter) + if len(strs) != 2 { + return nil, errors.ErrIdMissing + } + + _, apiKeyId, found = strings.Cut(strs[0], "id=") + if !found { + return nil, errors.ErrApiKeyIdMissing + } + + _, organizationId, found = strings.Cut(strs[1], "organization_id=") + if !found { + return nil, errors.ErrOrganizationIdMissing + } + + } + + resourceIDs := a.generateResourceIdMap(organizationId, apiKeyId) + + err := a.checkEmpty(resourceIDs) + if err != nil { + return nil, fmt.Errorf("resource import unsuccessful: %s", err) + } + + return resourceIDs, nil +} + +// generateResourceIdMap is used to populate a map with selected IDs +func (a *ApiKey) generateResourceIdMap(organizationId, apiKeyId string) map[string]string { + return map[string]string{ + OrganizationId: organizationId, + ApiKeyId: apiKeyId, + } +} + +// checkEmpty is used to verify that a supplied resourceId map has been populated +func (a *ApiKey) checkEmpty(resourceIdMap map[string]string) error { + if resourceIdMap[ApiKeyId] == "" { + return errors.ErrApiKeyIdCannotBeEmpty + } + + if resourceIdMap[OrganizationId] == "" { + return errors.ErrOrganizationIdCannotBeEmpty + } + return nil +} + +// ApiKeys defines model for GetApiKeysResponse. +type ApiKeys struct { + // OrganizationId The organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // Data It contains the list of resources. + Data []ApiKeyData `tfsdk:"data"` +} + +// ApiKeyData maps api key resource schema data +type ApiKeyData struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // AllowedCIDRs represents the list of inbound CIDRs for the API key. + // The system making a request must come from one of the allowed CIDRs. + AllowedCIDRs types.List `tfsdk:"allowed_cidrs"` + Audit types.Object `tfsdk:"audit"` + + // Description is the description for the API key. + Description types.String `tfsdk:"description"` + + // Expiry is the expiry of the API key in number of days. + // If set to -1, the token will not expire. + Expiry types.Float64 `tfsdk:"expiry"` + + // Id is the id is a unique identifier for an apiKey. + Id types.String `tfsdk:"id"` + + // Name is the name of the API key. + Name types.String `tfsdk:"name"` + + // OrganizationRoles are the organization level roles granted to the API key. + OrganizationRoles []types.String `tfsdk:"organization_roles"` + + // Resources are the resource level permissions associated + // with the API key. To learn more about Organization Roles, see + // [Organization Roles](https://docs.couchbase.com/cloud/organizations/organization-user-roles.html). + Resources []ApiKeyResourcesItems `tfsdk:"resources"` +} + +// NewApiKeyData creates a new apiKeyData object +func NewApiKeyData(apiKey *api.GetApiKeyResponse, organizationId string, auditObject basetypes.ObjectValue) (ApiKeyData, error) { + newApiKeyData := ApiKeyData{ + Id: types.StringValue(apiKey.Id), + OrganizationId: types.StringValue(organizationId), + Name: types.StringValue(apiKey.Name), + Description: types.StringValue(apiKey.Description), + Expiry: types.Float64Value(float64(apiKey.Expiry)), + Audit: auditObject, + } + + var newAllowedCidr []attr.Value + for _, allowedCidr := range apiKey.AllowedCIDRs { + newAllowedCidr = append(newAllowedCidr, types.StringValue(allowedCidr)) + } + + allowedCidrs, diags := types.ListValue(types.StringType, newAllowedCidr) + if diags.HasError() { + return ApiKeyData{}, fmt.Errorf("error while converting allowedcidrs") + } + + newApiKeyData.AllowedCIDRs = allowedCidrs + + var newOrganizationRoles []types.String + for _, organizationRole := range apiKey.OrganizationRoles { + newOrganizationRoles = append(newOrganizationRoles, types.StringValue(organizationRole)) + } + newApiKeyData.OrganizationRoles = newOrganizationRoles + + var newApiKeyResourcesItems []ApiKeyResourcesItems + for _, resource := range apiKey.Resources { + newResourceItem := ApiKeyResourcesItems{ + Id: types.StringValue(resource.Id.String()), + } + if resource.Type != nil { + newResourceItem.Type = types.StringValue(*resource.Type) + } + var newRoles []types.String + for _, role := range resource.Roles { + newRoles = append(newRoles, types.StringValue(role)) + } + newResourceItem.Roles = newRoles + newApiKeyResourcesItems = append(newApiKeyResourcesItems, newResourceItem) + } + newApiKeyData.Resources = newApiKeyResourcesItems + + return newApiKeyData, nil +} + +// Validate is used to verify that all the fields in the datasource +// have been populated. +func (a ApiKeys) Validate() (organizationId string, err error) { + if a.OrganizationId.IsNull() { + return "", errors.ErrOrganizationIdMissing + } + + return a.OrganizationId.ValueString(), nil +} + +// OrderList2 function to order list2 based on list1's Ids +func OrderList2(list1, list2 []ApiKeyResourcesItems) ([]ApiKeyResourcesItems, error) { + if len(list1) != len(list2) { + return nil, fmt.Errorf("returned resources is not same as in plan") + } + // Create a map from Id to APIKeyResourcesItems for list2 + idToItem := make(map[string]ApiKeyResourcesItems) + for _, item := range list2 { + idToItem[item.Id.ValueString()] = item + } + + // Create a new ordered list2 based on the order of list1's Ids + orderedList2 := make([]ApiKeyResourcesItems, len(list1)) + for i, item1 := range list1 { + orderedList2[i] = idToItem[item1.Id.ValueString()] + } + + if len(orderedList2) != len(list2) { + return nil, fmt.Errorf("returned resources is not same as in plan") + } + + return orderedList2, nil +} + +// AreEqual returns true if the two arrays contain the same elements, +// without any extra values, False otherwise. +func AreEqual[T comparable](array1 []T, array2 []T) bool { + if len(array1) != len(array2) { + return false + } + set1 := make(map[T]bool) + for _, element := range array1 { + set1[element] = true + } + + for _, element := range array2 { + if !set1[element] { + return false + } + } + + return len(set1) == len(array1) +} diff --git a/internal/schema/audit.go b/internal/schema/audit.go new file mode 100644 index 00000000..b9404d7b --- /dev/null +++ b/internal/schema/audit.go @@ -0,0 +1,52 @@ +package schema + +import ( + "terraform-provider-capella/internal/api" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// CouchbaseAuditData contains all audit-related fields. +type CouchbaseAuditData struct { + // CreatedAt The RFC3339 timestamp associated with when the resource was initially + // created. + CreatedAt types.String `tfsdk:"created_at"` + + // CreatedBy The user who created the resource; this will be a UUID4 ID for standard + // users and will be a string such as "internal-support" for internal + // Couchbase support users. + CreatedBy types.String `tfsdk:"created_by"` + + // ModifiedAt The RFC3339 timestamp associated with when the resource was last modified. + ModifiedAt types.String `tfsdk:"modified_at"` + + // ModifiedBy The user who last modified the resource; this will be a UUID4 ID for + // standard users and wilmal be a string such asas "internal-support" for + // internal Couchbase support users. + ModifiedBy types.String `tfsdk:"modified_by"` + + // Version The version of the document. This value is incremented each time the + // resource is modified. + Version types.Int64 `tfsdk:"version"` +} + +func (c CouchbaseAuditData) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "created_at": types.StringType, + "created_by": types.StringType, + "modified_at": types.StringType, + "modified_by": types.StringType, + "version": types.Int64Type, + } +} + +func NewCouchbaseAuditData(audit api.CouchbaseAuditData) CouchbaseAuditData { + return CouchbaseAuditData{ + CreatedAt: types.StringValue(audit.CreatedAt.String()), + CreatedBy: types.StringValue(audit.CreatedBy), + ModifiedAt: types.StringValue(audit.ModifiedAt.String()), + ModifiedBy: types.StringValue(audit.ModifiedBy), + Version: types.Int64Value(int64(audit.Version)), + } +} diff --git a/internal/schema/bucket.go b/internal/schema/bucket.go new file mode 100644 index 00000000..a5320ced --- /dev/null +++ b/internal/schema/bucket.go @@ -0,0 +1,162 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" + "strings" + "terraform-provider-capella/internal/errors" +) + +type Bucket struct { + Id types.String `tfsdk:"id"` + + Name types.String `tfsdk:"name"` + + // OrganizationId is the ID of the organization to which the Capella cluster belongs. + // The database credential will be created for the cluster. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the ID of the project to which the Capella cluster belongs. + // The database credential will be created for the cluster. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the ID of the cluster for which the database credential needs to be created. + ClusterId types.String `tfsdk:"cluster_id"` + + Type types.String `tfsdk:"type"` + + StorageBackend types.String `tfsdk:"storage_backend"` + + MemoryAllocationInMb int `tfsdk:"memory_allocationinmb"` + + BucketConflictResolution types.String `tfsdk:"conflict_resolution"` + + DurabilityLevel types.String `tfsdk:"durability_level"` + + Replicas int `tfsdk:"replicas"` + + Flush bool `tfsdk:"flush"` + + TimeToLiveInSeconds int `tfsdk:"ttl"` + + EvictionPolicy types.String `tfsdk:"eviction_policy"` + + Stats types.Object `tfsdk:"stats"` +} + +type Stats struct { + ItemCount types.Int64 `tfsdk:"item_count"` + OpsPerSecond types.Int64 `tfsdk:"ops_per_second"` + DiskUsedInMib types.Int64 `tfsdk:"disk_used_in_mib"` + MemoryUsedInMib types.Int64 `tfsdk:"memory_used_in_mib"` +} + +type OneBucket struct { + Id types.String `tfsdk:"id"` + + Name types.String `tfsdk:"name"` + + // OrganizationId is the ID of the organization to which the Capella cluster belongs. + // The database credential will be created for the cluster. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the ID of the project to which the Capella cluster belongs. + // The database credential will be created for the cluster. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the ID of the cluster for which the database credential needs to be created. + ClusterId types.String `tfsdk:"cluster_id"` + + Type types.String `tfsdk:"type"` + + StorageBackend types.String `tfsdk:"storage_backend"` + + MemoryAllocationInMb int `tfsdk:"memory_allocationinmb"` + + BucketConflictResolution types.String `tfsdk:"conflict_resolution"` + + DurabilityLevel types.String `tfsdk:"durability_level"` + + Replicas int `tfsdk:"replicas"` + + Flush bool `tfsdk:"flush"` + + TimeToLiveInSeconds int `tfsdk:"ttl"` + + EvictionPolicy types.String `tfsdk:"eviction_policy"` + + Stats *Stats `tfsdk:"stats"` +} + +// Validate will split the IDs by a delimiter i.e. comma , in case a terraform import CLI is invoked. +// The format of the terraform import CLI would include the IDs as follows - +// `terraform import capella_bucket.new_bucket id=,cluster_id=,project_id=,organization_id=` +func (c Bucket) Validate() (bucketId, clusterId, projectId, organizationId string, err error) { + + const ( + idDelimiter = "," + organizationIdSep = "organization_id=" + projectIdSep = "project_id=" + clusterIdSep = "cluster_id=" + bucketIdSep = "id=" + ) + + organizationId = c.OrganizationId.ValueString() + projectId = c.ProjectId.ValueString() + clusterId = c.ClusterId.ValueString() + bucketId = c.Id.ValueString() + var found bool + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if c.OrganizationId.IsNull() { + strs := strings.Split(c.Id.ValueString(), idDelimiter) + if len(strs) != 4 { + err = errors.ErrIdMissing + return + } + _, bucketId, found = strings.Cut(strs[0], bucketIdSep) + if !found { + err = errors.ErrDatabaseCredentialIdMissing + return + } + + _, clusterId, found = strings.Cut(strs[1], clusterIdSep) + if !found { + err = errors.ErrClusterIdMissing + return + } + + _, projectId, found = strings.Cut(strs[2], projectIdSep) + if !found { + err = errors.ErrProjectIdMissing + return + } + + _, organizationId, found = strings.Cut(strs[3], organizationIdSep) + if !found { + err = errors.ErrOrganizationIdMissing + return + } + } + + if bucketId == "" { + err = errors.ErrBucketIdCannotBeEmpty + return + } + + if clusterId == "" { + err = errors.ErrClusterIdCannotBeEmpty + return + } + + if projectId == "" { + err = errors.ErrProjectIdCannotBeEmpty + return + } + + if organizationId == "" { + err = errors.ErrOrganizationIdCannotBeEmpty + return + } + + return bucketId, clusterId, projectId, organizationId, nil +} diff --git a/internal/schema/certificate.go b/internal/schema/certificate.go new file mode 100644 index 00000000..750dbf67 --- /dev/null +++ b/internal/schema/certificate.go @@ -0,0 +1,17 @@ +package schema + +import "github.com/hashicorp/terraform-plugin-framework/types" + +type Certificate struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the capella tenant. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the clusterId of the capella tenant. + ClusterId types.String `tfsdk:"cluster_id"` + + // Certificate is the certificate of the cluster + Certificate types.String `tfsdk:"certificate"` +} diff --git a/internal/schema/cluster.go b/internal/schema/cluster.go new file mode 100644 index 00000000..35ca36f4 --- /dev/null +++ b/internal/schema/cluster.go @@ -0,0 +1,342 @@ +package schema + +import ( + "fmt" + "strings" + + clusterapi "terraform-provider-capella/internal/api/cluster" + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// Availability defines the type of Availability Zone configuration for a cluster resource. +// single type means the nodes in the cluster will all be deployed in a single availability +// zone in the cloud region. multi type means the nodes in the cluster will all be deployed +// in separate multiple availability zones in the cloud region. +type Availability struct { + // Type is the availability zone type, either 'single' or 'multi'. + Type types.String `tfsdk:"type"` +} + +// CloudProvider is the cloud provider where the cluster will be hosted. +// To learn more, see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). +type CloudProvider struct { + // Cidr is the cidr block for Cloud Provider. + Cidr types.String `tfsdk:"cidr"` + + // Region is the cloud provider region, e.g. 'us-west-2'. + // For information about supported regions, + // see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + Region types.String `tfsdk:"region"` + + // Type is the cloud provider type, either 'AWS', 'GCP', or 'Azure'. + Type types.String `tfsdk:"type"` +} + +// Compute depicts the couchbase compute, following are the supported compute combinations +// for CPU and RAM for different cloud providers. To learn more, +// see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). +type Compute struct { + // Cpu depicts cpu units (cores). + Cpu types.Int64 `tfsdk:"cpu"` + + // Ram depicts ram units (GB). + Ram types.Int64 `tfsdk:"ram"` +} + +// CouchbaseServer defines model for CouchbaseServer. +type CouchbaseServer struct { + // Version is the version of the Couchbase Server to be installed in the cluster. + // Refer to documentation [here](https://docs.couchbase.com/cloud/clusters/upgrade-database.html#server-version-maintenance-support) + // for list of supported versions. + // The latest Couchbase Server version will be deployed by default. + Version types.String `tfsdk:"version"` +} + +// Service is the couchbase service to run on the node. +type Service string + +// ServiceGroup is the set of nodes that share the same disk, number of nodes and services. +type ServiceGroup struct { + Node *Node `tfsdk:"node"` + + // NumOfNodes is number of nodes. The minimum number of nodes for the cluster + // can be 3 and maximum can be 27 nodes. Additional service groups can have + // 2 nodes minimum and 24 nodes maximum. + NumOfNodes types.Int64 `tfsdk:"num_of_nodes"` + + // Services is the couchbase service to run on the node. + Services []types.String `tfsdk:"services"` +} + +// Node defines model for Node. +type Node struct { + // Compute Following are the supported compute combinations for CPU and RAM + // for different cloud providers. To learn more, see + // [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + Compute Compute `tfsdk:"compute"` + Disk Node_Disk `tfsdk:"disk"` +} + +// Node_Disk defines model for Node.Disk. +type Node_Disk struct { + Type types.String `tfsdk:"type"` + Storage types.Int64 `tfsdk:"storage"` + IOPS types.Int64 `tfsdk:"iops"` +} + +// Support defines model for Support. +type Support struct { + // Plan is the plan type, either 'Basic', 'Developer Pro', or 'Enterprise'. + Plan types.String `tfsdk:"plan"` + + // Timezone is the standard timezone for the cluster. + // Should be the TZ identifier. + Timezone types.String `tfsdk:"timezone"` +} + +// Cluster defines model for CreateClusterRequest. +type Cluster struct { + ClusterData + + Etag types.String `tfsdk:"etag"` + + IfMatch types.String `tfsdk:"if_match"` +} + +// NewCluster create new cluster object +func NewCluster(cluster *clusterapi.GetClusterResponse, organizationId, projectId string, auditObject basetypes.ObjectValue) (*Cluster, error) { + newClusterData, err := NewClusterData(cluster, organizationId, projectId, auditObject) + if err != nil { + return nil, err + } + newCluster := Cluster{ + ClusterData: *newClusterData, + Etag: types.StringValue(cluster.Etag), + } + return &newCluster, nil +} + +func morphToTerraformServiceGroups(cluster *clusterapi.GetClusterResponse) ([]ServiceGroup, error) { + var newServiceGroups []ServiceGroup + for _, serviceGroup := range cluster.ServiceGroups { + newServiceGroup := ServiceGroup{ + Node: &Node{ + Compute: Compute{ + Ram: types.Int64Value(int64(serviceGroup.Node.Compute.Ram)), + Cpu: types.Int64Value(int64(serviceGroup.Node.Compute.Cpu)), + }, + }, + NumOfNodes: types.Int64Value(int64(*serviceGroup.NumOfNodes)), + } + + switch cluster.CloudProvider.Type { + case clusterapi.Aws: + awsDisk, err := serviceGroup.Node.AsDiskAWS() + if err != nil { + return nil, err + } + newServiceGroup.Node.Disk = Node_Disk{ + Type: types.StringValue(string(awsDisk.Type)), + Storage: types.Int64Value(int64(awsDisk.Storage)), + IOPS: types.Int64Value(int64(awsDisk.Iops)), + } + case clusterapi.Azure: + azureDisk, err := serviceGroup.Node.AsDiskAzure() + if err != nil { + return nil, err + } + + newServiceGroup.Node.Disk = Node_Disk{ + Type: types.StringValue(string(azureDisk.Type)), + Storage: types.Int64Value(int64(*azureDisk.Storage)), + IOPS: types.Int64Value(int64(*azureDisk.Iops)), + } + case clusterapi.Gcp: + gcpDisk, err := serviceGroup.Node.AsDiskGCP() + if err != nil { + return nil, err + } + newServiceGroup.Node.Disk = Node_Disk{ + Type: types.StringValue(string(gcpDisk.Type)), + Storage: types.Int64Value(int64(gcpDisk.Storage)), + } + default: + return nil, errors.ErrUnsupportedCloudProvider + } + + if serviceGroup.NumOfNodes != nil { + newServiceGroup.NumOfNodes = types.Int64Value(int64(*serviceGroup.NumOfNodes)) + } + + if serviceGroup.Services != nil { + for _, service := range *serviceGroup.Services { + tfService := types.StringValue(string(service)) + newServiceGroup.Services = append(newServiceGroup.Services, tfService) + } + } + newServiceGroups = append(newServiceGroups, newServiceGroup) + } + return newServiceGroups, nil +} + +func (c *Cluster) Validate() (map[string]string, error) { + const idDelimiter = "," + var found bool + + organizationId := c.OrganizationId.ValueString() + projectId := c.ProjectId.ValueString() + clusterId := c.Id.ValueString() + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if c.OrganizationId.IsNull() { + strs := strings.Split(c.Id.ValueString(), idDelimiter) + if len(strs) != 3 { + return nil, errors.ErrIdMissing + } + + _, clusterId, found = strings.Cut(strs[0], "id=") + if !found { + return nil, errors.ErrClusterIdMissing + } + + _, organizationId, found = strings.Cut(strs[1], "organization_id=") + if !found { + return nil, errors.ErrOrganizationIdMissing + } + + _, projectId, found = strings.Cut(strs[2], "project_id=") + if !found { + return nil, errors.ErrProjectIdMissing + } + } + + resourceIDs := c.generateResourceIdMap(organizationId, projectId, clusterId) + + err := c.checkEmpty(resourceIDs) + if err != nil { + return nil, fmt.Errorf("%s: %w", errors.ErrUnableToImportResource, err) + } + + return resourceIDs, nil +} + +// generateResourceIdMap is used to populate a map with selected IDs +func (a *Cluster) generateResourceIdMap(organizationId, projectId, clusterId string) map[string]string { + return map[string]string{ + OrganizationId: organizationId, + ProjectId: projectId, + ClusterId: clusterId, + } +} + +// checkEmpty is used to verify that a supplied resourceId map has been populated +func (a *Cluster) checkEmpty(resourceIdMap map[string]string) error { + if resourceIdMap[ClusterId] == "" { + return errors.ErrClusterIdCannotBeEmpty + } + + if resourceIdMap[ProjectId] == "" { + return errors.ErrProjectIdCannotBeEmpty + } + + if resourceIdMap[OrganizationId] == "" { + return errors.ErrOrganizationIdCannotBeEmpty + } + return nil +} + +// Clusters defines model for GetClustersResponse. +type Clusters struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the cluster + ProjectId types.String `tfsdk:"project_id"` + + // Data It contains the list of resources. + Data []ClusterData `tfsdk:"data"` +} + +// ClusterData defines model for single cluster data +type ClusterData struct { + Id types.String `tfsdk:"id"` + + // AppServiceId is the ID of the linked app service. + AppServiceId types.String `tfsdk:"app_service_id"` + + // Audit contains all audit-related fields. + Audit types.Object `tfsdk:"audit"` + + // OrganizationId is the organizationId of the capella tenant. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the projectId of the capella tenant. + ProjectId types.String `tfsdk:"project_id"` + + // Availability defines model for availability. + Availability *Availability `tfsdk:"availability"` + + // CloudProvider The cloud provider where the cluster will be hosted. + // To learn more, see [Amazon Web Services](https://docs.couchbase.com/cloud/reference/aws.html). + CloudProvider *CloudProvider `tfsdk:"cloud_provider"` + + // CouchbaseServer defines model for couchbaseServer. + CouchbaseServer *CouchbaseServer `tfsdk:"couchbase_server"` + + // Description of the cluster (up to 1024 characters). + Description types.String `tfsdk:"description"` + + // Name of the cluster (up to 256 characters). + Name types.String `tfsdk:"name"` + + // ServiceGroups is the couchbase service groups to be run. At least one service group must contain the data service. + ServiceGroups []ServiceGroup `tfsdk:"service_groups"` + + // Support defines model for Support. + Support *Support `tfsdk:"support"` + + // State defines the current state of cluster + CurrentState types.String `tfsdk:"current_state"` +} + +// NewClusterData creates a new cluster data object +func NewClusterData(cluster *clusterapi.GetClusterResponse, organizationId, projectId string, auditObject basetypes.ObjectValue) (*ClusterData, error) { + newClusterData := ClusterData{ + Id: types.StringValue(cluster.Id.String()), + OrganizationId: types.StringValue(organizationId), + ProjectId: types.StringValue(projectId), + Name: types.StringValue(cluster.Name), + Description: types.StringValue(cluster.Description), + Availability: &Availability{ + Type: types.StringValue(string(cluster.Availability.Type)), + }, + CloudProvider: &CloudProvider{ + Cidr: types.StringValue(cluster.CloudProvider.Cidr), + Region: types.StringValue(cluster.CloudProvider.Region), + Type: types.StringValue(string(cluster.CloudProvider.Type)), + }, + Support: &Support{ + Plan: types.StringValue(string(cluster.Support.Plan)), + Timezone: types.StringValue(string(cluster.Support.Timezone)), + }, + CurrentState: types.StringValue(string(cluster.CurrentState)), + Audit: auditObject, + } + + if cluster.CouchbaseServer.Version != nil { + version := *cluster.CouchbaseServer.Version + newClusterData.CouchbaseServer = &CouchbaseServer{ + Version: types.StringValue(version), + } + } + + newServiceGroups, err := morphToTerraformServiceGroups(cluster) + if err != nil { + return nil, err + } + newClusterData.ServiceGroups = newServiceGroups + return &newClusterData, nil +} diff --git a/internal/schema/const.go b/internal/schema/const.go new file mode 100644 index 00000000..80c89a06 --- /dev/null +++ b/internal/schema/const.go @@ -0,0 +1,8 @@ +package schema + +const ( + OrganizationId = "organizationId" + ProjectId = "projectId" + ClusterId = "clusterId" + ApiKeyId = "apiKeyId" +) diff --git a/internal/schema/database_credential.go b/internal/schema/database_credential.go new file mode 100644 index 00000000..76f9c191 --- /dev/null +++ b/internal/schema/database_credential.go @@ -0,0 +1,186 @@ +package schema + +import ( + "strings" + + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// DatabaseCredential maps the schema for the resource - database credential in Capella. +// A database credential is created on a cluster resource to gain read/write access to the cluster's data. +// This credential can have a fixed password supplied during creation or the password can be auto-generated. +// +// A database credential is simply a user in the couchbase server with some roles attached to it +// based on the Access field supplied during creation. +type DatabaseCredential struct { + // Id is the id of the created database credential. + Id types.String `tfsdk:"id"` + + // Name is the name of the database credential, the name of the database credential should follow this naming criteria: + // A database credential name should have at least 2 characters and up to 256 characters and should not contain spaces. + Name types.String `tfsdk:"name"` + + // Password is the password that you may want to use to create this database credential. + // This password can later be used to authenticate connections to the underlying couchbase server. + // The password should contain 8+ characters, at least 1 lower, 1 upper, 1 numerical and 1 special character. + Password types.String `tfsdk:"password"` + + // OrganizationId is the ID of the organization to which the Capella cluster belongs. + // The database credential will be created for the cluster. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the ID of the project to which the Capella cluster belongs. + // The database credential will be created for the cluster. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the ID of the cluster for which the database credential needs to be created. + ClusterId types.String `tfsdk:"cluster_id"` + + // Audit contains all audit-related fields. It is of types.Object type to avoid conversion error for a nested field. + Audit types.Object `tfsdk:"audit"` + + // Access is a list of access which can be narrowed to the scope level of every bucket in the Capella cluster. + // Access can be "read", "write" or both. + Access []Access `tfsdk:"access"` +} + +// Access is a list of privileges or permissions which can be narrowed to the scope level of every bucket in the Capella cluster. +type Access struct { + // Privileges is a list of permissions that the database credential will have over the data in the given bucket or scope. + // Privileges can be "read", "write" or both. + Privileges []types.String `tfsdk:"privileges"` + // Resources is the level at which the above privileges are defined. + // Ex: Access of read/write privilege can be defined at the bucket level or scope level resource. + Resources *Resources `tfsdk:"resources"` +} + +// Resources is the level at which the above privileges are defined. +// Ex: Access of read/write privilege can be defined at the bucket level or scope level resource. +type Resources struct { + // Buckets contains the details of all buckets with scope and collection level information to which the access applies. + Buckets []BucketResource `tfsdk:"buckets"` +} + +// BucketResource contains the details of a single bucket with scope and collection level information. +// Scopes can be a subset of all scopes inside the bucket, since this is defined only to govern the access. +type BucketResource struct { + Name types.String `tfsdk:"name"` + // Scopes is the details of the scopes inside the bucket to which we want to apply access privileges. + Scopes []Scope `tfsdk:"scopes"` +} + +// Scope is the details of a single scope inside the bucket, and it contains the collections details too. +// This collections can be a subset of all collections inside the scope, since this is defined only to govern the access. +type Scope struct { + Name types.String `tfsdk:"name"` + Collections []types.String `tfsdk:"collections"` +} + +// OneDatabaseCredential is used to retrieve the new state of a database credential after it is created by Terraform. +// This struct is separate from the DatabaseCredential struct because of the change in data type of its attributes after retrieval. +type OneDatabaseCredential struct { + // Audit All audit-related fields. + Audit CouchbaseAuditData `tfsdk:"audit"` + + // Id A GUID4 identifier of the created database credential. + Id types.String `tfsdk:"id"` + + // Name is the name of the database credential, the name of the database credential should follow this naming criteria: + // A database credential name should have at least 2 characters and up to 256 characters and should not contain spaces. + Name types.String `tfsdk:"name"` + + // Password is the password that you may want to use to create this database credential. + // This password can later be used to authenticate connections to the underlying couchbase server. + // The password should contain 8+ characters, at least 1 lower, 1 upper, 1 numerical and 1 special character. + Password types.String `tfsdk:"password"` + + // OrganizationId is the ID of the organization to which the Capella cluster belongs. + // The database credential will be created for the cluster. + OrganizationId types.String `tfsdk:"organization_id"` + + // ProjectId is the ID of the project to which the Capella cluster belongs. + // The database credential will be created for the cluster. + ProjectId types.String `tfsdk:"project_id"` + + // ClusterId is the ID of the cluster for which the database credential needs to be created. + ClusterId types.String `tfsdk:"cluster_id"` + + // Access is a list of access which can be narrowed to the scope level of every bucket in the Capella cluster. + // Access can be "read", "write" or both. + Access []Access `tfsdk:"access"` +} + +// Validate will split the IDs by a delimiter i.e. comma , in case a terraform import CLI is invoked. +// The format of the terraform import CLI would include the IDs as follows - +// `terraform import capella_database_credential.new_database_credential id=,cluster_id=,project_id=,organization_id=` +func (c DatabaseCredential) Validate() (databaseCredentialId, clusterId, projectId, organizationId string, err error) { + const ( + idDelimiter = "," + organizationIdSep = "organization_id=" + projectIdSep = "project_id=" + clusterIdSep = "cluster_id=" + databaseCredentialIdSep = "id=" + ) + + organizationId = c.OrganizationId.ValueString() + projectId = c.ProjectId.ValueString() + clusterId = c.ClusterId.ValueString() + databaseCredentialId = c.Id.ValueString() + var found bool + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if c.OrganizationId.IsNull() { + strs := strings.Split(c.Id.ValueString(), idDelimiter) + if len(strs) != 4 { + err = errors.ErrIdMissing + return + } + _, databaseCredentialId, found = strings.Cut(strs[0], databaseCredentialIdSep) + if !found { + err = errors.ErrDatabaseCredentialIdMissing + return + } + + _, clusterId, found = strings.Cut(strs[1], clusterIdSep) + if !found { + err = errors.ErrClusterIdMissing + return + } + + _, projectId, found = strings.Cut(strs[2], projectIdSep) + if !found { + err = errors.ErrProjectIdMissing + return + } + + _, organizationId, found = strings.Cut(strs[3], organizationIdSep) + if !found { + err = errors.ErrOrganizationIdMissing + return + } + } + + if databaseCredentialId == "" { + err = errors.ErrDatabaseCredentialIdCannotBeEmpty + return + } + + if clusterId == "" { + err = errors.ErrClusterIdCannotBeEmpty + return + } + + if projectId == "" { + err = errors.ErrProjectIdCannotBeEmpty + return + } + + if organizationId == "" { + err = errors.ErrOrganizationIdCannotBeEmpty + return + } + + return databaseCredentialId, clusterId, projectId, organizationId, nil +} diff --git a/internal/schema/database_credential_test.go b/internal/schema/database_credential_test.go new file mode 100644 index 00000000..f77d7da6 --- /dev/null +++ b/internal/schema/database_credential_test.go @@ -0,0 +1,97 @@ +package schema + +import ( + "testing" + + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/stretchr/testify/assert" +) + +func TestDatabaseCredentialSchemaValidate(t *testing.T) { + tests := []struct { + name string + input DatabaseCredential + expectedProjectId string + expectedOrganizationId string + expectedClusterId string + expectedDatabaseCredentialId string + expectedErr error + }{ + { + name: "[POSITIVE] project ID, organization ID, cluster ID, database credential ID are passed via terraform apply", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("100"), + ClusterId: basetypes.NewStringValue("200"), + ProjectId: basetypes.NewStringValue("300"), + OrganizationId: basetypes.NewStringValue("400"), + }, + expectedDatabaseCredentialId: "100", + expectedClusterId: "200", + expectedProjectId: "300", + expectedOrganizationId: "400", + }, + { + name: "[POSITIVE] IDs are passed via terraform import", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("id=100,cluster_id=200,project_id=300,organization_id=400"), + }, + expectedDatabaseCredentialId: "100", + expectedClusterId: "200", + expectedProjectId: "300", + expectedOrganizationId: "400", + }, + { + name: "[NEGATIVE] IDs follow the right syntax but order is incorrect in terraform import", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("id=100,organization_id=200,project_id=300,cluster_id=400"), + }, + expectedErr: errors.ErrClusterIdMissing, + }, + { + name: "[NEGATIVE] only database credential ID is passed via terraform apply", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("100"), + }, + expectedErr: errors.ErrIdMissing, + }, + { + name: "[NEGATIVE] only organization ID is passed via terraform apply", + input: DatabaseCredential{ + OrganizationId: basetypes.NewStringValue("100"), + }, + expectedErr: errors.ErrDatabaseCredentialIdCannotBeEmpty, + }, + { + name: "[NEGATIVE] IDs are incorrectly passed via terraform import", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("100&organization_id=200,projectId=123&cluster_id=900"), + }, + expectedErr: errors.ErrIdMissing, + }, + { + name: "[NEGATIVE] IDs are incorrectly passed via terraform import", + input: DatabaseCredential{ + Id: basetypes.NewStringValue("id=100,orgId=200,clusterId=300,project_id=900"), + }, + expectedErr: errors.ErrClusterIdMissing, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dbId, clusterId, projectId, organizationId, err := tt.input.Validate() + + if tt.expectedErr != nil { + assert.Equal(t, tt.expectedErr, err) + return + } + + assert.Equal(t, tt.expectedDatabaseCredentialId, dbId) + assert.Equal(t, tt.expectedClusterId, clusterId) + assert.Equal(t, tt.expectedProjectId, projectId) + assert.Equal(t, tt.expectedOrganizationId, organizationId) + }) + } +} diff --git a/internal/schema/organization.go b/internal/schema/organization.go new file mode 100644 index 00000000..72fb463e --- /dev/null +++ b/internal/schema/organization.go @@ -0,0 +1,47 @@ +package schema + +import ( + "terraform-provider-capella/internal/api/organization" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type Organization struct { + // Audit represents all audit-related fields. + Audit types.Object `tfsdk:"audit"` + + // OrganizationId is the organizationId of the capella tenant. + OrganizationId types.String `tfsdk:"organization_id"` + + // Name represents the name of the organization + Name types.String `tfsdk:"name"` + + // Description is a short description of the organization. + Description types.String `tfsdk:"description"` + + // Preferences stores preferences for the tenant. + Preferences types.Object `tfsdk:"preferences"` +} + +type Preferences struct { + // SessionDuration: Maximum allowed time in seconds inside the tenant for a user. + SessionDuration types.Int64 `tfsdk:"session_duration"` +} + +func (p Preferences) AttributeTypes() map[string]attr.Type { + return map[string]attr.Type{ + "session_duration": types.Int64Type, + } +} + +// NewPreferences create new preferences object +func NewPreferences(preference organization.Preferences) Preferences { + var sessionDuration int64 + if preference.SessionDuration != nil { + sessionDuration = int64(*preference.SessionDuration) + } + return Preferences{ + SessionDuration: types.Int64Value(sessionDuration), + } +} diff --git a/internal/schema/project.go b/internal/schema/project.go new file mode 100644 index 00000000..25ea7bbf --- /dev/null +++ b/internal/schema/project.go @@ -0,0 +1,95 @@ +package schema + +import ( + "strings" + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Project maps project resource schema data +type Project struct { + // Description The description of a particular project. + Description types.String `tfsdk:"description"` + + // Id A GUID4 identifier of the project. + Id types.String `tfsdk:"id"` + + // OrganizationId The organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // Name The name of the project. + Name types.String `tfsdk:"name"` + + Etag types.String `tfsdk:"etag"` + + IfMatch types.String `tfsdk:"if_match"` + + // Audit All audit-related fields. It is of types.Object type to avoid conversion error for a nested field. + Audit types.Object `tfsdk:"audit"` +} + +func (p Project) Validate() (projectId string, organizationId string, err error) { + const idDelimiter = "," + organizationId = p.OrganizationId.ValueString() + projectId = p.Id.ValueString() + var found bool + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if p.OrganizationId.IsNull() { + strs := strings.Split(p.Id.ValueString(), idDelimiter) + if len(strs) != 2 { + return "", "", errors.ErrIdMissing + } + _, projectId, found = strings.Cut(strs[0], "id=") + if !found { + return "", "", errors.ErrProjectIdMissing + } + + _, organizationId, found = strings.Cut(strs[1], "organization_id=") + if !found { + return "", "", errors.ErrOrganizationIdMissing + } + } + + if projectId == "" { + return "", "", errors.ErrProjectIdCannotBeEmpty + } + + if organizationId == "" { + return "", "", errors.ErrOrganizationIdCannotBeEmpty + } + + return projectId, organizationId, nil +} + +// Projects defines model for GetProjectsResponse. +type Projects struct { + // OrganizationId The organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // Data It contains the list of resources. + Data []OneProject `tfsdk:"data"` +} + +// OneProject maps project resource schema data; there is a separate response object to avoid conversion error for nested fields. +type OneProject struct { + // Audit All audit-related fields. + Audit CouchbaseAuditData `tfsdk:"audit"` + + // Description The description of a particular project. + Description types.String `tfsdk:"description"` + + // Id A GUID4 identifier of the project. + Id types.String `tfsdk:"id"` + + // OrganizationId The organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // Name The name of the project. + Name types.String `tfsdk:"name"` + + Etag types.String `tfsdk:"etag"` + + IfMatch types.String `tfsdk:"if_match"` +} diff --git a/internal/schema/project_test.go b/internal/schema/project_test.go new file mode 100644 index 00000000..9cbaebea --- /dev/null +++ b/internal/schema/project_test.go @@ -0,0 +1,88 @@ +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +func TestProjectSchemaValidate(t *testing.T) { + tests := []struct { + name string + input Project + expectedProjectId string + expectedOrganizationId string + expectedErr error + }{ + { + name: "[POSITIVE] project ID and organization ID are passed via terraform apply", + input: Project{ + Id: basetypes.NewStringValue("100"), + OrganizationId: basetypes.NewStringValue("200"), + }, + expectedProjectId: "100", + expectedOrganizationId: "200", + }, + { + name: "[POSITIVE] project ID and organization ID are passed via terraform import", + input: Project{ + Id: basetypes.NewStringValue("id=100,organization_id=200"), + }, + expectedProjectId: "100", + expectedOrganizationId: "200", + }, + { + name: "[NEGATIVE] only project ID is passed via terraform apply", + input: Project{ + Id: basetypes.NewStringValue("100"), + }, + expectedErr: errors.ErrIdMissing, + }, + { + name: "[NEGATIVE] only organization ID is passed via terraform apply", + input: Project{ + OrganizationId: basetypes.NewStringValue("100"), + }, + expectedErr: errors.ErrProjectIdCannotBeEmpty, + }, + { + name: "[NEGATIVE] project ID and organization ID are incorrectly passed via terraform import", + input: Project{ + Id: basetypes.NewStringValue("100&organization_id=200"), + }, + expectedErr: errors.ErrIdMissing, + }, + { + name: "[NEGATIVE] project ID and organization ID are incorrectly passed via terraform import", + input: Project{ + Id: basetypes.NewStringValue("id=100,orgId=200"), + }, + expectedErr: errors.ErrOrganizationIdMissing, + }, + { + name: "[NEGATIVE] project ID and organization ID are incorrectly passed via terraform import", + input: Project{ + Id: basetypes.NewStringValue("ProjectID=100,organization_id=200"), + }, + expectedErr: errors.ErrProjectIdMissing, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + projectId, organizationId, err := tt.input.Validate() + + if tt.expectedErr != nil { + assert.Equal(t, tt.expectedErr, err) + return + } + + assert.Equal(t, tt.expectedProjectId, projectId) + assert.Equal(t, tt.expectedOrganizationId, organizationId) + }) + } +} diff --git a/internal/schema/provider.go b/internal/schema/provider.go new file mode 100644 index 00000000..062ffbba --- /dev/null +++ b/internal/schema/provider.go @@ -0,0 +1,20 @@ +package schema + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" + "terraform-provider-capella/internal/api" +) + +// Config maps provider schema data to a Go type. +type Config struct { + Host types.String `tfsdk:"host"` + AuthenticationToken types.String `tfsdk:"authentication_token"` +} + +// Data is provider-defined data, clients, etc. that is passed +// to data sources or resources in the provider that implement the Configure method. +type Data struct { + HostURL string + Token string + Client *api.Client +} diff --git a/internal/schema/user.go b/internal/schema/user.go new file mode 100644 index 00000000..566c8fde --- /dev/null +++ b/internal/schema/user.go @@ -0,0 +1,246 @@ +package schema + +import ( + "fmt" + "strings" + "terraform-provider-capella/internal/api" + "terraform-provider-capella/internal/errors" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +// User maps User resource schema data +type User struct { + // Id is a GUID4 identifier of the user. + Id types.String `tfsdk:"id"` + + // Name represents the name of the user. + Name types.String `tfsdk:"name"` + + // Name represents the email of the user. + Email types.String `tfsdk:"email"` + + // Status depicts whether the user is verified or not + Status types.String `tfsdk:"status"` + + // Inactive depicts whether the user has accepted the invite for the organization. + Inactive types.Bool `tfsdk:"inactive"` + + // OrganizationId is the organizationId of the capella tenant. + OrganizationId types.String `tfsdk:"organization_id"` + + // OrganizationRoles is an array of strings representing the roles granted to the user + OrganizationRoles []types.String `tfsdk:"organization_roles"` + + // LastLogin is the time(UTC) at which user last logged in. + LastLogin types.String `tfsdk:"last_login"` + + // Region is the region of the user. + Region types.String `tfsdk:"region"` + + // TimeZone is the time zone of the user. + TimeZone types.String `tfsdk:"time_zone"` + + // EnableNotifications represents whether email alerts for databases in projects + // will be recieved. + EnableNotifications types.Bool `tfsdk:"enable_notifications"` + + // ExpiresAt is the time at which user expires. + ExpiresAt types.String `tfsdk:"expires_at"` + + // Resources is an array of objects representing the resources the user has access to + Resources []Resource `tfsdk:"resources"` + + // Audit represents all audit-related fields. It is of types.Object type to avoid conversion error for a nested field. + Audit types.Object `tfsdk:"audit"` +} + +// NewUser creates a new instance of a User object +func NewUser( + Id types.String, + name types.String, + email types.String, + status types.String, + inactive types.Bool, + organizationId types.String, + organizationRoles []types.String, + lastLogin types.String, + region types.String, + timeZone types.String, + enableNotifications types.Bool, + expiresAt types.String, + resources []Resource, + audit basetypes.ObjectValue, +) *User { + newUser := User{ + Id: Id, + Name: name, + Email: email, + Status: status, + Inactive: inactive, + OrganizationId: organizationId, + OrganizationRoles: organizationRoles, + LastLogin: lastLogin, + Region: region, + TimeZone: timeZone, + EnableNotifications: enableNotifications, + ExpiresAt: expiresAt, + Resources: resources, + Audit: audit, + } + return &newUser +} + +type Resource struct { + // Id is a GUID4 identifier of the resource. + Id types.String `tfsdk:"id"` + + // Type is the type of the resource. + Type types.String `tfsdk:"type"` + + // Roles is an array of strings representing a users project roles + Roles []types.String `tfsdk:"roles"` +} + +// Users defines the model for GetUsers +type Users struct { + // OrganizationId is the organizationId of the capella. + OrganizationId types.String `tfsdk:"organization_id"` + + // Data contains the list of resources. + Data []User `tfsdk:"data"` +} + +// Validate is used to verify that IDs have been properly imported +// TODO (AV-53457): add unit testing +func (u *User) Validate() (map[string]string, error) { + const idDelimiter = "," + var found bool + + organizationId := u.OrganizationId.ValueString() + userId := u.Id.ValueString() + + // check if the id is a comma separated string of multiple IDs, usually passed during the terraform import CLI + if u.OrganizationId.IsNull() { + strs := strings.Split(u.Id.ValueString(), idDelimiter) + if len(strs) != 2 { + return nil, errors.ErrIdMissing + } + + _, userId, found = strings.Cut(strs[0], "id=") + if !found { + return nil, errors.ErrAllowListIdMissing + } + + _, organizationId, found = strings.Cut(strs[1], "organization_id=") + if !found { + return nil, errors.ErrOrganizationIdMissing + } + } + + resourceIDs := u.generateResourceIdMap(organizationId, userId) + + err := u.checkEmpty(resourceIDs) + if err != nil { + return nil, fmt.Errorf("%s: %w", errors.ErrUnableToImportResource, err) + } + + return resourceIDs, nil +} + +// generateResourceIdmap is used to populate a map with selected IDs +// TODO (AV-53457): add unit testing +func (u *User) generateResourceIdMap(organizationId, userId string) map[string]string { + return map[string]string{ + "organizationId": organizationId, + "userId": userId, + } +} + +// checkEmpty is used to verify that a supplied resourceId map has been populated +// TODO (AV-53457): add unit testing +func (u *User) checkEmpty(resourceIdMap map[string]string) error { + if resourceIdMap["userId"] == "" { + return errors.ErrAllowListIdCannotBeEmpty + } + + if resourceIdMap["organizationId"] == "" { + return errors.ErrOrganizationIdCannotBeEmpty + } + return nil +} + +// MorphOrganizationRoles is used to convert nested organizationRoles from +// strings to terraform type.String. +// TODO (AV-53457): add unit testing +func MorphOrganizationRoles(organizationRoles []string) []basetypes.StringValue { + var morphedRoles []basetypes.StringValue + for _, role := range organizationRoles { + morphedRoles = append(morphedRoles, types.StringValue(role)) + } + return morphedRoles +} + +// ConvertOrganizationRoles is used to convert all roles +// in an array of basetypes.StringValue to strings. +// TODO (AV-53457): add unit testing +func ConvertOrganizationRoles(organizationRoles []basetypes.StringValue) []string { + var convertedRoles []string + for _, role := range organizationRoles { + convertedRoles = append(convertedRoles, role.ValueString()) + } + return convertedRoles +} + +// ConvertResource is used to convert a resource object containing nested fields +// of type basetypes.StringValue to a resource object containing nested fields of type string. +// TODO (AV-53457): add unit testing +func ConvertResources(resources []Resource) []api.Resource { + var convertedResources []api.Resource + for _, resource := range resources { + var convertedResource api.Resource + convertedResource.Id = resource.Id.ValueString() + + resourceType := resource.Type.ValueString() + convertedResource.Type = &resourceType + + // Iterate through roles belonging to the user and convert to string + var convertedRoles []string + for _, role := range resource.Roles { + convertedRoles = append(convertedRoles, role.ValueString()) + } + convertedResource.Roles = convertedRoles + + convertedResources = append(convertedResources, convertedResource) + } + return convertedResources +} + +// MorphResources is used to covert nested resources from strings +// to terraform types.String +// TODO (AV-53457): add unit testing +func MorphResources(resources []api.Resource) []Resource { + var morphedResources []Resource + for _, resource := range resources { + var morphedResource Resource + + morphedResource.Id = types.StringValue(resource.Id) + + // Check for optional field + if resource.Type != nil { + resourceType := types.StringValue(*resource.Type) + morphedResource.Type = resourceType + } + + var roles []basetypes.StringValue + for _, role := range resource.Roles { + roles = append(roles, types.StringValue(role)) + } + + morphedResource.Roles = roles + morphedResources = append(morphedResources, morphedResource) + + } + return morphedResources +} diff --git a/main.go b/main.go new file mode 100644 index 00000000..c8581f9d --- /dev/null +++ b/main.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "flag" + "log" + + "terraform-provider-capella/internal/provider" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" +) + +// Run the docs generation tool, check its repository for more information on how it works and how docs +// can be customized. +//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs + +var ( + // these will be set by the goreleaser configuration + // to appropriate values for the compiled binary. + version string = "local" + + // goreleaser can pass other information to the main package, such as the specific commit + // https://goreleaser.com/cookbooks/using-main.version/ +) + +func main() { + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + opts := providerserver.ServeOpts{ + // NOTE: This is not a typical Terraform Registry provider address, + // such as registry.terraform.io/hashicorp/hashicups. This specific + // provider address is used in these tutorials in conjunction with a + // specific Terraform CLI configuration for manual development testing + // of this provider. + Address: "hashicorp.com/couchbasecloud/capella", + Debug: debug, + } + + err := providerserver.Serve( + context.Background(), + provider.New(version), + opts) + + if err != nil { + log.Fatal(err.Error()) + } +} diff --git a/terraform-registry-manifest.json b/terraform-registry-manifest.json new file mode 100644 index 00000000..fec2a569 --- /dev/null +++ b/terraform-registry-manifest.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "metadata": { + "protocol_versions": ["6.0"] + } +}