From f43ec9dc9e7969aefee30aa69d0ffc1c0f5a9279 Mon Sep 17 00:00:00 2001 From: Nicolas Dupeux Date: Thu, 19 Jan 2023 16:22:14 +0100 Subject: [PATCH] Add resource and data for ovh_dbaas_logs_cluster --- ovh/data_dbaas_logs_cluster.go | 141 +++++++++ ovh/data_dbaas_logs_cluster_test.go | 42 +++ ovh/provider.go | 2 + ovh/provider_test.go | 2 +- ovh/resource_dbaas_logs_cluster.go | 277 ++++++++++++++++++ ovh/resource_dbaas_logs_cluster_test.go | 74 +++++ ovh/types_dbaas_logs.go | 28 +- .../docs/d/dbaas_logs_cluster.html.markdown | 37 +++ .../docs/r/dbaas_logs_cluster.html.markdown | 56 ++++ 9 files changed, 657 insertions(+), 2 deletions(-) create mode 100644 ovh/data_dbaas_logs_cluster.go create mode 100644 ovh/data_dbaas_logs_cluster_test.go create mode 100644 ovh/resource_dbaas_logs_cluster.go create mode 100644 ovh/resource_dbaas_logs_cluster_test.go create mode 100644 website/docs/d/dbaas_logs_cluster.html.markdown create mode 100644 website/docs/r/dbaas_logs_cluster.html.markdown diff --git a/ovh/data_dbaas_logs_cluster.go b/ovh/data_dbaas_logs_cluster.go new file mode 100644 index 000000000..babc22ecc --- /dev/null +++ b/ovh/data_dbaas_logs_cluster.go @@ -0,0 +1,141 @@ +package ovh + +import ( + "fmt" + "log" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceDbaasLogsCluster() *schema.Resource { + return &schema.Resource{ + Read: func(d *schema.ResourceData, meta interface{}) error { + return dataSourceDbaasLogsClusterRead(d, meta) + }, + Schema: map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Description: "The service name", + Required: true, + }, + // Computed + "cluster_type": { + Type: schema.TypeString, + Description: "Cluster type", + Computed: true, + }, + "dedicated_input_pem": { + Type: schema.TypeString, + Description: "PEM for dedicated inputs", + Computed: true, + Sensitive: true, + }, + "archive_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for ARCHIVE flow type", + Computed: true, + }, + "direct_input_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for DIRECT_INPUT flow type", + Computed: true, + }, + "direct_input_pem": { + Type: schema.TypeString, + Description: "PEM for direct inputs", + Computed: true, + Sensitive: true, + }, + "hostname": { + Type: schema.TypeString, + Description: "hostname", + Computed: true, + }, + "is_default": { + Type: schema.TypeBool, + Description: "All content generated by given service will be placed on this cluster", + Computed: true, + }, + "is_unlocked": { + Type: schema.TypeBool, + Description: "Allow given service to perform advanced operations on cluster", + Computed: true, + }, + "query_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for QUERY flow type", + Computed: true, + }, + "region": { + Type: schema.TypeString, + Description: "Data center localization", + Computed: true, + }, + }, + } +} + +func dbaasGetClusterID(config *Config, serviceName string) (string, error) { + res := []string{} + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster", + url.PathEscape(serviceName), + ) + + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return "", fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + return res[0], nil +} + +func dataSourceDbaasLogsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + + log.Printf("[DEBUG] Will read dbaas logs cluster %s", serviceName) + + cluster_id, err := dbaasGetClusterID(config, serviceName) + + if err != nil { + return fmt.Errorf("Error fetching info for %s:\n\t %q", serviceName, err) + } + + d.SetId(cluster_id) + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + res := map[string]interface{}{} + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + d.Set("archive_allowed_networks", res["archiveAllowedNetworks"]) + d.Set("cluster_type", res["clusterType"]) + d.Set("dedicated_input_pem", res["dedicatedInputPEM"]) + d.Set("direct_input_allowed_networks", res["directInputAllowedNetworks"]) + d.Set("direct_input_pem", res["directInputPEM"]) + d.Set("hostname", res["hostname"]) + d.Set("is_default", res["isDefault"]) + d.Set("is_unlocked", res["isUnlocked"]) + d.Set("query_allowed_networks", res["queryAllowedNetworks"]) + d.Set("region", res["region"]) + + return nil +} diff --git a/ovh/data_dbaas_logs_cluster_test.go b/ovh/data_dbaas_logs_cluster_test.go new file mode 100644 index 000000000..84ac2276d --- /dev/null +++ b/ovh/data_dbaas_logs_cluster_test.go @@ -0,0 +1,42 @@ +package ovh + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const testAccDataSourceDbaasLogsCluster = ` +data "ovh_dbaas_logs_cluster" "ldp" { + service_name = "%s" +} +` + +func TestAccDataSourceDbaasLogsCluster(t *testing.T) { + serviceName := os.Getenv("OVH_DBAAS_LOGS_SERVICE_TEST") + + config := fmt.Sprintf( + testAccDataSourceDbaasLogsCluster, + serviceName, + ) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckDbaasLogs(t) }, + + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "data.ovh_dbaas_logs_cluster.ldp", + "service_name", + serviceName, + ), + ), + }, + }, + }) +} diff --git a/ovh/provider.go b/ovh/provider.go index e49f41e05..bdc52e358 100644 --- a/ovh/provider.go +++ b/ovh/provider.go @@ -91,6 +91,7 @@ func Provider() *schema.Provider { "ovh_cloud_project_user_s3_credentials": dataCloudProjectUserS3Credentials(), "ovh_cloud_project_user_s3_policy": dataCloudProjectUserS3Policy(), "ovh_cloud_project_users": datasourceCloudProjectUsers(), + "ovh_dbaas_logs_cluster": dataSourceDbaasLogsCluster(), "ovh_dbaas_logs_input_engine": dataSourceDbaasLogsInputEngine(), "ovh_dbaas_logs_output_graylog_stream": dataSourceDbaasLogsOutputGraylogStream(), "ovh_dedicated_ceph": dataSourceDedicatedCeph(), @@ -160,6 +161,7 @@ func Provider() *schema.Provider { "ovh_cloud_project_user_s3_credential": resourceCloudProjectUserS3Credential(), "ovh_cloud_project_user_s3_policy": resourceCloudProjectUserS3Policy(), "ovh_cloud_project_workflow_backup": resourceCloudProjectWorkflowBackup(), + "ovh_dbaas_logs_cluster": resourceDbaasLogsCluster(), "ovh_dbaas_logs_input": resourceDbaasLogsInput(), "ovh_dbaas_logs_output_graylog_stream": resourceDbaasLogsOutputGraylogStream(), "ovh_dedicated_ceph_acl": resourceDedicatedCephACL(), diff --git a/ovh/provider_test.go b/ovh/provider_test.go index 9745281e7..8776233e5 100644 --- a/ovh/provider_test.go +++ b/ovh/provider_test.go @@ -161,7 +161,7 @@ func testAccPreCheckHostingPrivateDatabaseWhitelist(t *testing.T) { checkEnvOrSkip(t, "OVH_HOSTING_PRIVATEDATABASE_WHITELIST_SFTP_TEST") } -// Checks that the environment variables needed for the /cloud acceptance tests +// Checks that the environment variables needed for the /dbaas acceptance tests // are set. func testAccPreCheckDbaasLogs(t *testing.T) { testAccPreCheckCredentials(t) diff --git a/ovh/resource_dbaas_logs_cluster.go b/ovh/resource_dbaas_logs_cluster.go new file mode 100644 index 000000000..0df9e946b --- /dev/null +++ b/ovh/resource_dbaas_logs_cluster.go @@ -0,0 +1,277 @@ +package ovh + +import ( + "fmt" + "log" + "net/url" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceDbaasLogsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceDbaasLogsClusterCreate, + Update: resourceDbaasLogsClusterUpdate, + Read: resourceDbaasLogsClusterRead, + Delete: resourceDbaasLogsClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceDbaasLogsClusterImportState, + }, + + Schema: resourceDbaasLogsClusterSchema(), + } +} + +func resourceDbaasLogsClusterSchema() map[string]*schema.Schema { + schema := map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "archive_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for ARCHIVE flow type", + Optional: true, + }, + "direct_input_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for DIRECT_INPUT flow type", + Optional: true, + }, + "query_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for QUERY flow type", + Optional: true, + }, + + // Computed + "cluster_type": { + Type: schema.TypeString, + Description: "Cluster type", + Computed: true, + }, + "dedicated_input_pem": { + Type: schema.TypeString, + Description: "PEM for dedicated inputs", + Computed: true, + Sensitive: true, + }, + "direct_input_pem": { + Type: schema.TypeString, + Description: "PEM for direct inputs", + Computed: true, + Sensitive: true, + }, + "hostname": { + Type: schema.TypeString, + Description: "hostname", + Computed: true, + }, + "is_default": { + Type: schema.TypeBool, + Description: "All content generated by given service will be placed on this cluster", + Computed: true, + }, + "is_unlocked": { + Type: schema.TypeBool, + Description: "Allow given service to perform advanced operations on cluster", + Computed: true, + }, + "region": { + Type: schema.TypeString, + Description: "Data center localization", + Computed: true, + }, + // Store ACL before the cluster was managed by terraform + "initial_archive_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Initial allowed networks for ARCHIVE flow type", + Computed: true, + Sensitive: true, + }, + "initial_direct_input_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Initial allowed networks for DIRECT_INPUT flow type", + Computed: true, + Sensitive: true, + }, + "initial_query_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Initial allowed networks for QUERY flow type", + Computed: true, + Sensitive: true, + }, + } + + return schema +} + +func resourceDbaasLogsClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id, err := dbaasGetClusterID(config, serviceName) + if err != nil { + return fmt.Errorf("Error fetching info for %s:\n\t %q", serviceName, err) + } + d.SetId(cluster_id) + + // Fetch current ACL to restore them as-is when the resource is deleted + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + res := map[string]interface{}{} + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + d.Set("initial_archive_allowed_networks", res["archiveAllowedNetworks"]) + d.Set("initial_direct_input_allowed_networks", res["directInputAllowedNetworks"]) + d.Set("initial_query_allowed_networks", res["queryAllowedNetworks"]) + + return resourceDbaasLogsClusterUpdate(d, meta) +} + +func resourceDbaasLogsClusterDelete(d *schema.ResourceData, meta interface{}) error { + // Restore ACL as they were before we managed the resource using terraform + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id := d.Id() + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + opts := &DbaasLogsOpts{} + ArchiveAllowedNetworks := d.Get("initial_archive_allowed_networks").(*schema.Set).List() + opts.ArchiveAllowedNetworks = make([]string, len(ArchiveAllowedNetworks)) + for i, ipBlock := range ArchiveAllowedNetworks { + opts.ArchiveAllowedNetworks[i] = ipBlock.(string) + } + DirectInputAllowedNetworks := d.Get("initial_direct_input_allowed_networks").(*schema.Set).List() + opts.DirectInputAllowedNetworks = make([]string, len(DirectInputAllowedNetworks)) + for i, ipBlock := range DirectInputAllowedNetworks { + opts.DirectInputAllowedNetworks[i] = ipBlock.(string) + } + QueryAllowedNetworks := d.Get("initial_query_allowed_networks").(*schema.Set).List() + opts.QueryAllowedNetworks = make([]string, len(QueryAllowedNetworks)) + for i, ipBlock := range QueryAllowedNetworks { + opts.QueryAllowedNetworks[i] = ipBlock.(string) + } + res := &DbaasLogsOperation{} + if err := config.OVHClient.Put(endpoint, opts, res); err != nil { + return fmt.Errorf("Error calling Put %s:\n\t %q", endpoint, err) + } + + // Wait for operation status + if _, err := waitForDbaasLogsOperation(config.OVHClient, serviceName, res.OperationId); err != nil { + return err + } + + d.SetId("") + + return nil +} + +func resourceDbaasLogsClusterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + givenId := d.Id() + splitId := strings.SplitN(givenId, "/", 2) + if len(splitId) != 2 { + return nil, fmt.Errorf("Import Id is not service_name/id formatted") + } + serviceName := splitId[0] + id := splitId[1] + d.SetId(id) + d.Set("service_name", serviceName) + + results := make([]*schema.ResourceData, 1) + results[0] = d + return results, nil +} + +func resourceDbaasLogsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + id := d.Id() + + log.Printf("[INFO] Will update dbaas logs cluster for: %s", serviceName) + + opts := (&DbaasLogsOpts{}).FromResource(d) + res := &DbaasLogsOperation{} + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(id), + ) + + if err := config.OVHClient.Put(endpoint, opts, res); err != nil { + return fmt.Errorf("Error calling Put %s:\n\t %q", endpoint, err) + } + + // Wait for operation status + if _, err := waitForDbaasLogsOperation(config.OVHClient, serviceName, res.OperationId); err != nil { + return err + } + + return resourceDbaasLogsClusterRead(d, meta) +} + +func resourceDbaasLogsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id := d.Id() + + log.Printf("[DEBUG] Will read dbaas logs cluster %s", serviceName) + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + res := map[string]interface{}{} + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + d.Set("archive_allowed_networks", res["archiveAllowedNetworks"]) + d.Set("cluster_type", res["clusterType"]) + d.Set("dedicated_input_pem", res["dedicatedInputPEM"]) + d.Set("direct_input_allowed_networks", res["directInputAllowedNetworks"]) + d.Set("direct_input_pem", res["directInputPEM"]) + d.Set("hostname", res["hostname"]) + d.Set("is_default", res["isDefault"]) + d.Set("is_unlocked", res["isUnlocked"]) + d.Set("query_allowed_networks", res["queryAllowedNetworks"]) + d.Set("region", res["region"]) + + return nil +} diff --git a/ovh/resource_dbaas_logs_cluster_test.go b/ovh/resource_dbaas_logs_cluster_test.go new file mode 100644 index 000000000..21312e961 --- /dev/null +++ b/ovh/resource_dbaas_logs_cluster_test.go @@ -0,0 +1,74 @@ +package ovh + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("ovh_dbaas_logs_cluster", &resource.Sweeper{ + Name: "ovh_dbaas_logs_cluster", + F: testSweepDbaasLogsCluster, + }) +} + +func testSweepDbaasLogsCluster(region string) error { + serviceName := os.Getenv("OVH_DBAAS_LOGS_SERVICE_TEST") + if serviceName == "" { + log.Print("[DEBUG] OVH_DBAAS_LOGS_SERVICE_TEST is not set. No LDP cluster to sweep") + return nil + } + + // Nothing to sweep as LDP dedicated cluster can't be created/deleted thru API + + return nil +} + +func TestAccDbaasLogsCluster(t *testing.T) { + serviceName := os.Getenv("OVH_DBAAS_LOGS_SERVICE_TEST") + config := fmt.Sprintf( + testAccDbaasLogsClusterConfig, + serviceName, + ) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckDbaasLogs(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ovh_dbaas_logs_cluster.ldp", "service_name", serviceName), + resource.TestCheckResourceAttrSet( + "ovh_dbaas_logs_cluster.ldp", "dedicated_input_pem"), + resource.TestCheckResourceAttrSet( + "ovh_dbaas_logs_cluster.ldp", "direct_input_pem"), + resource.TestCheckTypeSetElemAttr( + "ovh_dbaas_logs_cluster.ldp", "archive_allowed_networks.*", "10.0.0.0/16", + ), + resource.TestCheckTypeSetElemAttr( + "ovh_dbaas_logs_cluster.ldp", "direct_input_allowed_networks.*", "10.0.0.0/16", + ), + resource.TestCheckTypeSetElemAttr( + "ovh_dbaas_logs_cluster.ldp", "query_allowed_networks.*", "10.0.0.0/16", + ), + ), + }, + }, + }) +} + +const testAccDbaasLogsClusterConfig = ` +resource "ovh_dbaas_logs_cluster" "ldp" { + service_name = "%s" + + archive_allowed_networks = ["10.0.0.0/16"] + direct_input_allowed_networks = ["10.0.0.0/16"] + query_allowed_networks = ["10.0.0.0/16"] +} +` diff --git a/ovh/types_dbaas_logs.go b/ovh/types_dbaas_logs.go index 539d07504..8ceb15be8 100644 --- a/ovh/types_dbaas_logs.go +++ b/ovh/types_dbaas_logs.go @@ -1,6 +1,8 @@ package ovh -import () +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) type DbaasLogsInputEngine struct { Id string `json:"engineId"` @@ -32,3 +34,27 @@ type DbaasLogsOperation struct { StreamId *string `json:"streamId"` UpdatedAt string `json:"updatedAt"` } + +type DbaasLogsOpts struct { + ArchiveAllowedNetworks []string `json:"archiveAllowedNetworks"` + DirectInputAllowedNetworks []string `json:"directInputAllowedNetworks"` + QueryAllowedNetworks []string `json:"queryAllowedNetworks"` +} + +func convertNetworks(networks []interface{}) []string { + if networks == nil { + return nil + } + networksString := make([]string, len(networks)) + for i, net := range networks { + networksString[i] = net.(string) + } + return networksString +} + +func (opts *DbaasLogsOpts) FromResource(d *schema.ResourceData) *DbaasLogsOpts { + opts.ArchiveAllowedNetworks = convertNetworks(d.Get("archive_allowed_networks").(*schema.Set).List()) + opts.DirectInputAllowedNetworks = convertNetworks(d.Get("direct_input_allowed_networks").(*schema.Set).List()) + opts.QueryAllowedNetworks = convertNetworks(d.Get("query_allowed_networks").(*schema.Set).List()) + return opts +} diff --git a/website/docs/d/dbaas_logs_cluster.html.markdown b/website/docs/d/dbaas_logs_cluster.html.markdown new file mode 100644 index 000000000..a82f34f2b --- /dev/null +++ b/website/docs/d/dbaas_logs_cluster.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "ovh" +page_title: "OVH: dbaas_logs_cluster" +sidebar_current: "docs-ovh-datasource-dbaas-logs" +description: |- + Get informations of a DBaas logs cluster tenant. +--- + +# ovh_dbaas_logs_cluster (Data Source) + +Use this data source to retrieve informations about a DBaas logs cluster tenant. + +## Example Usage + +```hcl +data "ovh_dbaas_logs_cluster" "logstash" { + service_name = "ldp-xx-xxxxx" +} +``` + +## Argument Reference + +* `service_name` - The service name. It's the ID of your Logs Data Platform instance. + +## Attributes Reference + +* `id` is set to input engine ID +* `cluster_type` is type of cluster (DEDICATED, PRO or TRIAL) +* `dedicated_input_pem` is PEM for dedicated inputs +* `archive_allowed_networks` is allowed networks for ARCHIVE flow type +* `direct_input_allowed_networks` is allowed networks for DIRECT_INPUT flow type +* `direct_input_pem` is PEM for direct inputs +* `hostname` is cluster hostname hosting the tenant +* `is_default` is true if all content generated by given service will be placed on this cluster +* `is_unlocked` is true if given service can perform advanced operations on cluster +* `query_allowed_networks` is allowed networks for QUERY flow type +* `region` is datacenter localization diff --git a/website/docs/r/dbaas_logs_cluster.html.markdown b/website/docs/r/dbaas_logs_cluster.html.markdown new file mode 100644 index 000000000..aa994e7e5 --- /dev/null +++ b/website/docs/r/dbaas_logs_cluster.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "ovh" +page_title: "OVH: ovh_dbaas_logs_cluster" +sidebar_current: "docs-ovh-resource-dbaas-logs-cluster" +description: |- + Manage a existing dbaas logs cluster. +--- + +# ovh_dbaas_logs_cluster + +Reference a DBaaS logs cluster to manipulate ACL on a `DEDICATED` cluster +type. + +!> An LDP cluster can't be created and deleted via Terraform at this time. So when Terraform destroys the resource, it only actually restores it to its initial state. + +## Example Usage + +```hcl +resource "ovh_dbaas_logs_cluster" "ldp" { + service_name = "ldp-xx-xxxxx" + + archive_allowed_networks = ["10.0.0.0/16"] + direct_input_allowed_networks = ["10.0.0.0/16"] + query_allowed_networks = ["10.0.0.0/16"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `archive_allowed_networks` - List of IP blocks +* `direct_input_allowed_networks` - List of IP blocks +* `query_allowed_networks` - List of IP blocks + +## Attributes Reference + +Id is set to the input Id. In addition, the following attributes are exported: +* `cluster_type` is type of cluster (DEDICATED, PRO or TRIAL) +* `dedicated_input_pem` is PEM for dedicated inputs +* `archive_allowed_networks` is allowed networks for ARCHIVE flow type +* `direct_input_allowed_networks` is allowed networks for DIRECT_INPUT flow type +* `direct_input_pem` is PEM for direct inputs +* `hostname` is cluster hostname hosting the tenant +* `is_default` is true if all content generated by given service will be placed on this cluster +* `is_unlocked` is true if given service can perform advanced operations on cluster +* `query_allowed_networks` is allowed networks for QUERY flow type +* `region` is datacenter localization + +## Import + +OVHcloud DBaaS Log Data Platform clusters can be imported using the `service_name` and `id` of the cluster, separated by "/" E.g., + +```bash +$ terraform import ovh_dbaas_logs_cluster.ldp service_name/id +```