diff --git a/ovh/data_dbaas_logs.go b/ovh/data_dbaas_logs.go new file mode 100644 index 000000000..b18ab2477 --- /dev/null +++ b/ovh/data_dbaas_logs.go @@ -0,0 +1,138 @@ +package ovh + +import ( + "fmt" + "log" + "net/url" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceDbaasLogs() *schema.Resource { + return &schema.Resource{ + Read: func(d *schema.ResourceData, meta interface{}) error { + return dataSourceDbaasLogsRead(d, meta) + }, + Schema: map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Description: "The service name", + Required: true, + }, + "cluster_type": { + Type: schema.TypeString, + Description: "Cluster type", + Computed: true, + }, + "dedicated_input_pem": { + Type: schema.TypeString, + Description: "PEM for dedicated inputs", + Computed: true, + }, + "archive_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for ARCHIVE flow type", + Computed: true, + }, + "direct_input_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for DIRECT_INPUT flow type", + Computed: true, + }, + "direct_input_pem": { + Type: schema.TypeString, + Description: "PEM for direct inputs", + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Description: "hostname", + Computed: true, + }, + "is_default": { + Type: schema.TypeBool, + Description: "All content generated by given service will be placed on this cluster", + Computed: true, + }, + "is_unlocked": { + Type: schema.TypeBool, + Description: "Allow given service to perform advanced operations on cluster", + Computed: true, + }, + "query_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for QUERY flow type", + Computed: true, + }, + "region": { + Type: schema.TypeString, + Description: "Data center localization", + Computed: true, + }, + }, + } +} + +func dbaasGetClusterID(config *Config, serviceName string) (string, error) { + res := []string{} + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster", + url.PathEscape(serviceName), + ) + + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return "", fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + return res[0], nil +} + +func dataSourceDbaasLogsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + + log.Printf("[DEBUG] Will read dbaas logs cluster %s", serviceName) + + cluster_id, err := dbaasGetClusterID(config, serviceName) + + if err != nil { + return fmt.Errorf("Error fetching info for %s:\n\t %q", serviceName, err) + } + + d.SetId(cluster_id) + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + res := map[string]interface{}{} + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + d.Set("archive_allowed_networks", res["archiveAllowedNetworks"]) + d.Set("cluster_type", res["clusterType"]) + d.Set("dedicated_input_pem", res["dedicatedInputPEM"]) + d.Set("direct_input_allowed_networks", res["directInputAllowedNetworks"]) + d.Set("direct_input_pem", res["directInputPEM"]) + d.Set("hostname", res["hostname"]) + d.Set("is_default", res["isDefault"]) + d.Set("is_unlocked", res["isUnlocked"]) + d.Set("query_allowed_networks", res["queryAllowedNetworks"]) + d.Set("region", res["region"]) + + return nil +} diff --git a/ovh/data_dbaas_logs_test.go b/ovh/data_dbaas_logs_test.go new file mode 100644 index 000000000..7ad977e68 --- /dev/null +++ b/ovh/data_dbaas_logs_test.go @@ -0,0 +1,47 @@ +package ovh + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +const testAccDataSourceDbaasLogs_dedicated = ` +data "ovh_dbaas_logs" "ldp" { + service_name = "%s" +} +` + +func TestAccDataSourceDbaasLogs_dedicated(t *testing.T) { + serviceName := os.Getenv("OVH_DBAAS_LOGS_SERVICE_TEST") + + config := fmt.Sprintf( + testAccDataSourceDbaasLogsInputEngine_basic, + serviceName, + ) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckDbaasLogs(t) }, + + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "data.ovh_dbaas_logs.ldp", + "cluster_type", + "DEDICATED", + ), + resource.TestCheckResourceAttr( + "data.ovh_dbaas_logs.ldp", + "region", + "GRA", + ), + ), + }, + }, + }) +} diff --git a/ovh/provider.go b/ovh/provider.go index 54f2fef7b..43800b7ec 100644 --- a/ovh/provider.go +++ b/ovh/provider.go @@ -2,10 +2,11 @@ package ovh import ( "context" - ini "gopkg.in/ini.v1" "os" "sync" + ini "gopkg.in/ini.v1" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mitchellh/go-homedir" @@ -86,6 +87,7 @@ func Provider() *schema.Provider { "ovh_cloud_project_user_s3_credentials": dataCloudProjectUserS3Credentials(), "ovh_cloud_project_user_s3_policy": dataCloudProjectUserS3Policy(), "ovh_cloud_project_users": datasourceCloudProjectUsers(), + "ovh_dbaas_logs": dataSourceDbaasLogs(), "ovh_dbaas_logs_input_engine": dataSourceDbaasLogsInputEngine(), "ovh_dbaas_logs_output_graylog_stream": dataSourceDbaasLogsOutputGraylogStream(), "ovh_dedicated_ceph": dataSourceDedicatedCeph(), @@ -154,6 +156,7 @@ func Provider() *schema.Provider { "ovh_cloud_project_user": resourceCloudProjectUser(), "ovh_cloud_project_user_s3_credential": resourceCloudProjectUserS3Credential(), "ovh_cloud_project_user_s3_policy": resourceCloudProjectUserS3Policy(), + "ovh_dbaas_logs_cluster": resourceDbaasLogsCluster(), "ovh_dbaas_logs_input": resourceDbaasLogsInput(), "ovh_dbaas_logs_output_graylog_stream": resourceDbaasLogsOutputGraylogStream(), "ovh_dedicated_ceph_acl": resourceDedicatedCephACL(), diff --git a/ovh/resource_dbaas_logs_cluster.go b/ovh/resource_dbaas_logs_cluster.go new file mode 100644 index 000000000..1f96bd05f --- /dev/null +++ b/ovh/resource_dbaas_logs_cluster.go @@ -0,0 +1,213 @@ +package ovh + +import ( + "fmt" + "log" + "net/url" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceDbaasLogsCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceDbaasLogsClusterCreate, + Update: resourceDbaasLogsClusterUpdate, + Read: resourceDbaasLogsClusterRead, + Delete: resourceDbaasLogsClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceDbaasLogsClusterImportState, + }, + + Schema: resourceDbaasLogsClusterSchema(), + } +} + +func resourceDbaasLogsClusterSchema() map[string]*schema.Schema { + schema := map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cluster_type": { + Type: schema.TypeString, + Description: "Cluster type", + Computed: true, + }, + "dedicated_input_pem": { + Type: schema.TypeString, + Description: "PEM for dedicated inputs", + Computed: true, + }, + "archive_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for ARCHIVE flow type", + Optional: true, + }, + "direct_input_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for DIRECT_INPUT flow type", + Optional: true, + }, + "direct_input_pem": { + Type: schema.TypeString, + Description: "PEM for direct inputs", + Computed: true, + }, + "hostname": { + Type: schema.TypeString, + Description: "hostname", + Computed: true, + }, + "is_default": { + Type: schema.TypeBool, + Description: "All content generated by given service will be placed on this cluster", + Computed: true, + }, + "is_unlocked": { + Type: schema.TypeBool, + Description: "Allow given service to perform advanced operations on cluster", + Computed: true, + }, + "query_allowed_networks": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "Allowed networks for QUERY flow type", + Optional: true, + }, + "region": { + Type: schema.TypeString, + Description: "Data center localization", + Computed: true, + }, + } + + return schema +} + +func resourceDbaasLogsClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id, err := dbaasGetClusterID(config, serviceName) + if err != nil { + return fmt.Errorf("Error fetching info for %s:\n\t %q", serviceName, err) + } + d.SetId(cluster_id) + + return resourceDbaasLogsClusterUpdate(d, meta) +} + +func resourceDbaasLogsClusterDelete(d *schema.ResourceData, meta interface{}) error { + if false { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id := d.Id() + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + opts := &DbaasLogsOpts{ + ArchiveAllowedNetworks: []string{}, + DirectInputAllowedNetworks: []string{}, + QueryAllowedNetworks: []string{}, + } + res := &DbaasLogsOperation{} + if err := config.OVHClient.Put(endpoint, opts, res); err != nil { + return fmt.Errorf("Error calling Put %s:\n\t %q", endpoint, err) + } + } + d.SetId("") + + return nil +} + +func resourceDbaasLogsClusterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + givenId := d.Id() + splitId := strings.SplitN(givenId, "/", 2) + if len(splitId) != 2 { + return nil, fmt.Errorf("Import Id is not service_name/id formatted") + } + serviceName := splitId[0] + id := splitId[1] + d.SetId(id) + d.Set("service_name", serviceName) + + results := make([]*schema.ResourceData, 1) + results[0] = d + return results, nil +} + +func resourceDbaasLogsClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + id := d.Id() + + log.Printf("[INFO] Will update dbaas logs cluster for: %s", serviceName) + + opts := (&DbaasLogsOpts{}).FromResource(d) + res := &DbaasLogsOperation{} + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(id), + ) + + if err := config.OVHClient.Put(endpoint, opts, res); err != nil { + return fmt.Errorf("Error calling Put %s:\n\t %q", endpoint, err) + } + + // Wait for operation status + if _, err := waitForDbaasLogsOperation(config.OVHClient, serviceName, res.OperationId); err != nil { + return err + } + + return resourceDbaasLogsClusterRead(d, meta) +} + +func resourceDbaasLogsClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceName := d.Get("service_name").(string) + cluster_id := d.Id() + + log.Printf("[DEBUG] Will read dbaas logs cluster %s", serviceName) + + endpoint := fmt.Sprintf( + "/dbaas/logs/%s/cluster/%s", + url.PathEscape(serviceName), + url.PathEscape(cluster_id), + ) + + res := map[string]interface{}{} + if err := config.OVHClient.Get(endpoint, &res); err != nil { + return fmt.Errorf("Error calling GET %s:\n\t %q", endpoint, err) + } + + d.Set("archive_allowed_networks", res["archiveAllowedNetworks"]) + d.Set("cluster_type", res["clusterType"]) + d.Set("dedicated_input_pem", res["dedicatedInputPEM"]) + d.Set("direct_input_allowed_networks", res["directInputAllowedNetworks"]) + d.Set("direct_input_pem", res["directInputPEM"]) + d.Set("hostname", res["hostname"]) + d.Set("is_default", res["isDefault"]) + d.Set("is_unlocked", res["isUnlocked"]) + d.Set("query_allowed_networks", res["queryAllowedNetworks"]) + d.Set("region", res["region"]) + + return nil +} diff --git a/ovh/types_dbaas_logs.go b/ovh/types_dbaas_logs.go index 539d07504..8ceb15be8 100644 --- a/ovh/types_dbaas_logs.go +++ b/ovh/types_dbaas_logs.go @@ -1,6 +1,8 @@ package ovh -import () +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) type DbaasLogsInputEngine struct { Id string `json:"engineId"` @@ -32,3 +34,27 @@ type DbaasLogsOperation struct { StreamId *string `json:"streamId"` UpdatedAt string `json:"updatedAt"` } + +type DbaasLogsOpts struct { + ArchiveAllowedNetworks []string `json:"archiveAllowedNetworks"` + DirectInputAllowedNetworks []string `json:"directInputAllowedNetworks"` + QueryAllowedNetworks []string `json:"queryAllowedNetworks"` +} + +func convertNetworks(networks []interface{}) []string { + if networks == nil { + return nil + } + networksString := make([]string, len(networks)) + for i, net := range networks { + networksString[i] = net.(string) + } + return networksString +} + +func (opts *DbaasLogsOpts) FromResource(d *schema.ResourceData) *DbaasLogsOpts { + opts.ArchiveAllowedNetworks = convertNetworks(d.Get("archive_allowed_networks").(*schema.Set).List()) + opts.DirectInputAllowedNetworks = convertNetworks(d.Get("direct_input_allowed_networks").(*schema.Set).List()) + opts.QueryAllowedNetworks = convertNetworks(d.Get("query_allowed_networks").(*schema.Set).List()) + return opts +} diff --git a/website/docs/d/dbaas_logs.html.markdown b/website/docs/d/dbaas_logs.html.markdown new file mode 100644 index 000000000..31f9ef580 --- /dev/null +++ b/website/docs/d/dbaas_logs.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "ovh" +page_title: "OVH: dbaas_logs" +sidebar_current: "docs-ovh-datasource-dbaas-logs" +description: |- + Get information of a DBaas logs tenant. +--- + +# ovh_dbaas_logs (Data Source) + +Use this data source to retrieve informations about a DBaas logs tenant. + +## Example Usage + +```hcl +data "ovh_dbaas_logs" "logstash" { + service_name = "ldp-xx-xxxxx" +} +``` + +## Argument Reference + +* `service_name` - The service name. It's the ID of your Logs Data Platform instance. + +## Attributes Reference + +* `id` is set to input engine ID +* `cluster_type` is type of cluster (DEDICATED, PRO or TRIAL) +* `dedicated_input_pem` is PEM for dedicated inputs +* `archive_allowed_networks` is allowed networks for ARCHIVE flow type +* `direct_input_allowed_networks` is allowed networks for DIRECT_INPUT flow type +* `direct_input_pem` is PEM for direct inputs +* `hostname` is cluster hostname hosting the tenant +* `is_default` is true if all content generated by given service will be placed on this cluster +* `is_unlocked` is true if given service can perform advanced operations on cluster +* `query_allowed_networks` is allowed networks for QUERY flow type +* `region` is datacenter localization diff --git a/website/docs/r/dbaas_logs_cluster.html.markdown b/website/docs/r/dbaas_logs_cluster.html.markdown new file mode 100644 index 000000000..463bb1ef3 --- /dev/null +++ b/website/docs/r/dbaas_logs_cluster.html.markdown @@ -0,0 +1,48 @@ +--- +layout: "ovh" +page_title: "OVH: ovh_dbaas_logs_cluster" +sidebar_current: "docs-ovh-resource-dbaas-logs-cluster" +description: |- + Manage a existing dbaas logs cluster. +--- + +# ovh_dbaas_logs_cluster + +Reference a dbaas logs cluster to manipulate ACL. + +As LDP cluster can't be created through API, create and delete +operation are no-op. + +## Example Usage + +```hcl +resource "ovh_dbaas_logs_cluster" "ldp" { + service_name = "ldp-ld-45517" + + archive_input_allowed_networks = ["10.0.0.0/16"] + direct_input_allowed_networks = ["10.0.0.0/16"] + query_input_allowed_networks = ["10.0.0.0/16"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `archive_allowed_networks` - List of IP blocks +* `direct_input_allowed_networks` - List of IP blocks +* `query_allowed_networks` - List of IP blocks + +## Attributes Reference + +Id is set to the input Id. In addition, the following attributes are exported: +* `cluster_type` is type of cluster (DEDICATED, PRO or TRIAL) +* `dedicated_input_pem` is PEM for dedicated inputs +* `archive_allowed_networks` is allowed networks for ARCHIVE flow type +* `direct_input_allowed_networks` is allowed networks for DIRECT_INPUT flow type +* `direct_input_pem` is PEM for direct inputs +* `hostname` is cluster hostname hosting the tenant +* `is_default` is true if all content generated by given service will be placed on this cluster +* `is_unlocked` is true if given service can perform advanced operations on cluster +* `query_allowed_networks` is allowed networks for QUERY flow type +* `region` is datacenter localization