Skip to content

Commit

Permalink
Storage transfer POSIX data source and sink (#5652) (#4029)
Browse files Browse the repository at this point in the history
* on premise transfer

* Add posix data sink

* Add ExactlyOneOf and docs

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Feb 4, 2022
1 parent 73db072 commit 439bd84
Show file tree
Hide file tree
Showing 4 changed files with 264 additions and 10 deletions.
3 changes: 3 additions & 0 deletions .changelog/5652.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
storagetransfer: added support for POSIX data source and data sink to `google_storage_transfer_job` via `transfer_spec.posix_data_source` and `transfer_spec.posix_data_sink` fields
```
80 changes: 71 additions & 9 deletions google-beta/resource_storage_transfer_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ var (
"transfer_spec.0.aws_s3_data_source",
"transfer_spec.0.http_data_source",
"transfer_spec.0.azure_blob_storage_data_source",
"transfer_spec.0.posix_data_source",
}
transferSpecDataSinkKeys = []string{
"transfer_spec.0.gcs_data_sink",
"transfer_spec.0.posix_data_sink",
}
awsS3AuthKeys = []string{
"transfer_spec.0.aws_s3_data_source.0.aws_access_key",
Expand Down Expand Up @@ -77,11 +82,20 @@ func resourceStorageTransferJob() *schema.Resource {
"object_conditions": objectConditionsSchema(),
"transfer_options": transferOptionsSchema(),
"gcs_data_sink": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: gcsDataSchema(),
Description: `A Google Cloud Storage data sink.`,
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: gcsDataSchema(),
ExactlyOneOf: transferSpecDataSinkKeys,
Description: `A Google Cloud Storage data sink.`,
},
"posix_data_sink": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: posixDataSchema(),
ExactlyOneOf: transferSpecDataSinkKeys,
Description: `A POSIX filesystem data sink.`,
},
"gcs_data_source": {
Type: schema.TypeList,
Expand All @@ -107,6 +121,14 @@ func resourceStorageTransferJob() *schema.Resource {
ExactlyOneOf: transferSpecDataSourceKeys,
Description: `A HTTP URL data source.`,
},
"posix_data_source": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: posixDataSchema(),
ExactlyOneOf: transferSpecDataSourceKeys,
Description: `A POSIX filesystem data source.`,
},
"azure_blob_storage_data_source": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -397,6 +419,18 @@ func httpDataSchema() *schema.Resource {
}
}

func posixDataSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
"root_directory": {
Type: schema.TypeString,
Required: true,
Description: `Root directory path to the filesystem.`,
},
},
}
}

func azureBlobStorageDataSchema() *schema.Resource {
return &schema.Resource{
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -499,7 +533,6 @@ func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) er
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Transfer Job %q", name))
}
log.Printf("[DEBUG] Read transfer job: %v in project: %v \n\n", res.Name, res.ProjectId)

if err := d.Set("project", res.ProjectId); err != nil {
return fmt.Errorf("Error setting project: %s", err)
Expand Down Expand Up @@ -847,6 +880,25 @@ func flattenHttpData(httpData *storagetransfer.HttpData) []map[string]interface{
return []map[string]interface{}{data}
}

func expandPosixData(posixDatas []interface{}) *storagetransfer.PosixFilesystem {
if len(posixDatas) == 0 || posixDatas[0] == nil {
return nil
}

posixData := posixDatas[0].(map[string]interface{})
return &storagetransfer.PosixFilesystem{
RootDirectory: posixData["root_directory"].(string),
}
}

func flattenPosixData(posixData *storagetransfer.PosixFilesystem) []map[string]interface{} {
data := map[string]interface{}{
"root_directory": posixData.RootDirectory,
}

return []map[string]interface{}{data}
}

func expandAzureCredentials(azureCredentials []interface{}) *storagetransfer.AzureCredentials {
if len(azureCredentials) == 0 || azureCredentials[0] == nil {
return nil
Expand Down Expand Up @@ -930,6 +982,9 @@ func expandTransferOptions(options []interface{}) *storagetransfer.TransferOptio
}

func flattenTransferOption(option *storagetransfer.TransferOptions) []map[string]interface{} {
if option.DeleteObjectsFromSourceAfterTransfer == false && option.DeleteObjectsUniqueInSink == false && option.OverwriteObjectsAlreadyExistingInSink == false {
return nil
}
data := map[string]interface{}{
"delete_objects_from_source_after_transfer": option.DeleteObjectsFromSourceAfterTransfer,
"delete_objects_unique_in_sink": option.DeleteObjectsUniqueInSink,
Expand All @@ -947,19 +1002,24 @@ func expandTransferSpecs(transferSpecs []interface{}) *storagetransfer.TransferS
transferSpec := transferSpecs[0].(map[string]interface{})
return &storagetransfer.TransferSpec{
GcsDataSink: expandGcsData(transferSpec["gcs_data_sink"].([]interface{})),
PosixDataSink: expandPosixData(transferSpec["posix_data_sink"].([]interface{})),
ObjectConditions: expandObjectConditions(transferSpec["object_conditions"].([]interface{})),
TransferOptions: expandTransferOptions(transferSpec["transfer_options"].([]interface{})),
GcsDataSource: expandGcsData(transferSpec["gcs_data_source"].([]interface{})),
AwsS3DataSource: expandAwsS3Data(transferSpec["aws_s3_data_source"].([]interface{})),
HttpDataSource: expandHttpData(transferSpec["http_data_source"].([]interface{})),
AzureBlobStorageDataSource: expandAzureBlobStorageData(transferSpec["azure_blob_storage_data_source"].([]interface{})),
PosixDataSource: expandPosixData(transferSpec["posix_data_source"].([]interface{})),
}
}

func flattenTransferSpec(transferSpec *storagetransfer.TransferSpec, d *schema.ResourceData) []map[string][]map[string]interface{} {

data := map[string][]map[string]interface{}{
"gcs_data_sink": flattenGcsData(transferSpec.GcsDataSink),
data := map[string][]map[string]interface{}{}
if transferSpec.GcsDataSink != nil {
data["gcs_data_sink"] = flattenGcsData(transferSpec.GcsDataSink)
}
if transferSpec.PosixDataSink != nil {
data["posix_data_sink"] = flattenPosixData(transferSpec.PosixDataSink)
}

if transferSpec.ObjectConditions != nil {
Expand All @@ -976,6 +1036,8 @@ func flattenTransferSpec(transferSpec *storagetransfer.TransferSpec, d *schema.R
data["http_data_source"] = flattenHttpData(transferSpec.HttpDataSource)
} else if transferSpec.AzureBlobStorageDataSource != nil {
data["azure_blob_storage_data_source"] = flattenAzureBlobStorageData(transferSpec.AzureBlobStorageDataSource, d)
} else if transferSpec.PosixDataSource != nil {
data["posix_data_source"] = flattenPosixData(transferSpec.PosixDataSource)
}

return []map[string][]map[string]interface{}{data}
Expand Down
177 changes: 177 additions & 0 deletions google-beta/resource_storage_transfer_job_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,52 @@ func TestAccStorageTransferJob_omitScheduleEndDate(t *testing.T) {
})
}

func TestAccStorageTransferJob_posixSource(t *testing.T) {
t.Parallel()

testDataSinkName := randString(t, 10)
testTransferJobDescription := randString(t, 10)

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccStorageTransferJobDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccStorageTransferJob_posixSource(getTestProjectFromEnv(), testDataSinkName, testTransferJobDescription),
},
{
ResourceName: "google_storage_transfer_job.transfer_job",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccStorageTransferJob_posixSink(t *testing.T) {
t.Parallel()

testDataSourceName := randString(t, 10)
testTransferJobDescription := randString(t, 10)

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccStorageTransferJobDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccStorageTransferJob_posixSink(getTestProjectFromEnv(), testDataSourceName, testTransferJobDescription),
},
{
ResourceName: "google_storage_transfer_job.transfer_job",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testAccStorageTransferJobDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
config := googleProviderConfig(t)
Expand Down Expand Up @@ -319,3 +365,134 @@ resource "google_storage_transfer_job" "transfer_job" {
}
`, project, dataSourceBucketName, project, dataSinkBucketName, project, transferJobDescription, project)
}

func testAccStorageTransferJob_posixSource(project string, dataSinkBucketName string, transferJobDescription string) string {
return fmt.Sprintf(`
data "google_storage_transfer_project_service_account" "default" {
project = "%s"
}
resource "google_storage_bucket" "data_sink" {
name = "%s"
project = "%s"
location = "US"
force_destroy = true
}
resource "google_storage_bucket_iam_member" "data_sink" {
bucket = google_storage_bucket.data_sink.name
role = "roles/storage.admin"
member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}"
}
resource "google_project_iam_member" "pubsub" {
project = data.google_storage_transfer_project_service_account.default.project
role = "roles/pubsub.admin"
member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}"
}
resource "google_storage_transfer_job" "transfer_job" {
description = "%s"
project = "%s"
transfer_spec {
posix_data_source {
root_directory = "/some/path"
}
gcs_data_sink {
bucket_name = google_storage_bucket.data_sink.name
path = "foo/bar/"
}
}
schedule {
schedule_start_date {
year = 2018
month = 10
day = 1
}
schedule_end_date {
year = 2019
month = 10
day = 1
}
start_time_of_day {
hours = 0
minutes = 30
seconds = 0
nanos = 0
}
}
depends_on = [
google_storage_bucket_iam_member.data_sink,
google_project_iam_member.pubsub
]
}
`, project, dataSinkBucketName, project, transferJobDescription, project)
}

func testAccStorageTransferJob_posixSink(project string, dataSourceBucketName string, transferJobDescription string) string {
return fmt.Sprintf(`
data "google_storage_transfer_project_service_account" "default" {
project = "%s"
}
resource "google_storage_bucket" "data_source" {
name = "%s"
project = "%s"
location = "US"
force_destroy = true
}
resource "google_storage_bucket_iam_member" "data_source" {
bucket = google_storage_bucket.data_source.name
role = "roles/storage.admin"
member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}"
}
resource "google_project_iam_member" "pubsub" {
project = data.google_storage_transfer_project_service_account.default.project
role = "roles/pubsub.admin"
member = "serviceAccount:${data.google_storage_transfer_project_service_account.default.email}"
}
resource "google_storage_transfer_job" "transfer_job" {
description = "%s"
project = "%s"
transfer_spec {
posix_data_sink {
root_directory = "/some/path"
}
gcs_data_source {
bucket_name = google_storage_bucket.data_source.name
}
}
schedule {
schedule_start_date {
year = 2018
month = 10
day = 1
}
schedule_end_date {
year = 2019
month = 10
day = 1
}
start_time_of_day {
hours = 0
minutes = 30
seconds = 0
nanos = 0
}
}
depends_on = [
google_storage_bucket_iam_member.data_source,
google_project_iam_member.pubsub
]
}
`, project, dataSourceBucketName, project, transferJobDescription, project)
}
14 changes: 13 additions & 1 deletion website/docs/r/storage_transfer_job.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,18 @@ The following arguments are supported:

<a name="nested_transfer_spec"></a>The `transfer_spec` block supports:

* `gcs_data_sink` - (Required) A Google Cloud Storage data sink. Structure [documented below](#nested_gcs_data_sink).
* `gcs_data_sink` - (Optional) A Google Cloud Storage data sink. Structure [documented below](#nested_gcs_data_sink).

* `posix_data_sink` - (Optional) A POSIX data sink. Structure [documented below](#nested_posix_data_sink).

* `object_conditions` - (Optional) Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' `last_modification_time` do not exclude objects in a data sink. Structure [documented below](#nested_object_conditions).

* `transfer_options` - (Optional) Characteristics of how to treat files from datasource and sink during job. If the option `delete_objects_unique_in_sink` is true, object conditions based on objects' `last_modification_time` are ignored and do not exclude objects in a data source or a data sink. Structure [documented below](#nested_transfer_options).

* `gcs_data_source` - (Optional) A Google Cloud Storage data source. Structure [documented below](#nested_gcs_data_source).

* `posix_data_source` - (Optional) A POSIX filesystem data source. Structure [documented below](#nested_posix_data_source).

* `aws_s3_data_source` - (Optional) An AWS S3 data source. Structure [documented below](#nested_aws_s3_data_source).

* `http_data_source` - (Optional) A HTTP URL data source. Structure [documented below](#nested_http_data_source).
Expand Down Expand Up @@ -164,6 +168,14 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam

* `path` - (Optional) Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.

<a name="nested_posix_data_sink"></a>The `posix_data_sink` block supports:

* `root_directory` - (Required) Root directory path to the filesystem.

<a name="nested_posix_data_source"></a>The `posix_data_source` block supports:

* `root_directory` - (Required) Root directory path to the filesystem.

<a name="nested_aws_s3_data_source"></a>The `aws_s3_data_source` block supports:

* `bucket_name` - (Required) S3 Bucket name.
Expand Down

0 comments on commit 439bd84

Please sign in to comment.