Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

provider/aws: Add support for S3 logging. #4482

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 86 additions & 0 deletions builtin/providers/aws/resource_aws_s3_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,30 @@ func resourceAwsS3Bucket() *schema.Resource {
},
},

"logging": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"target_bucket": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"target_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
Set: func(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
buf.WriteString(fmt.Sprintf("%s-", m["target_bucket"]))
buf.WriteString(fmt.Sprintf("%s-", m["target_prefix"]))
return hashcode.String(buf.String())
},
},

"tags": tagsSchema(),

"force_destroy": &schema.Schema{
Expand Down Expand Up @@ -231,6 +255,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
}
}

if d.HasChange("logging") {
if err := resourceAwsS3BucketLoggingUpdate(s3conn, d); err != nil {
return err
}
}

return resourceAwsS3BucketRead(d, meta)
}

Expand Down Expand Up @@ -341,6 +371,29 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
}
}

// Read the logging configuration
logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{
Bucket: aws.String(d.Id()),
})
if err != nil {
return err
}
log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging)
if v := logging.LoggingEnabled; v != nil {
lcl := make([]map[string]interface{}, 0, 1)
lc := make(map[string]interface{})
if *v.TargetBucket != "" {
lc["target_bucket"] = *v.TargetBucket
}
if *v.TargetPrefix != "" {
lc["target_prefix"] = *v.TargetPrefix
}
lcl = append(lcl, lc)
if err := d.Set("logging", lcl); err != nil {
return err
}
}

// Add the region as an attribute
location, err := s3conn.GetBucketLocation(
&s3.GetBucketLocationInput{
Expand Down Expand Up @@ -726,6 +779,39 @@ func resourceAwsS3BucketVersioningUpdate(s3conn *s3.S3, d *schema.ResourceData)
return nil
}

func resourceAwsS3BucketLoggingUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
logging := d.Get("logging").(*schema.Set).List()
bucket := d.Get("bucket").(string)
loggingStatus := &s3.BucketLoggingStatus{}

if len(logging) > 0 {
c := logging[0].(map[string]interface{})

loggingEnabled := &s3.LoggingEnabled{}
if val, ok := c["target_bucket"]; ok {
loggingEnabled.TargetBucket = aws.String(val.(string))
}
if val, ok := c["target_prefix"]; ok {
loggingEnabled.TargetPrefix = aws.String(val.(string))
}

loggingStatus.LoggingEnabled = loggingEnabled
}

i := &s3.PutBucketLoggingInput{
Bucket: aws.String(bucket),
BucketLoggingStatus: loggingStatus,
}
log.Printf("[DEBUG] S3 put bucket logging: %#v", i)

_, err := s3conn.PutBucketLogging(i)
if err != nil {
return fmt.Errorf("Error putting S3 logging: %s", err)
}

return nil
}

func normalizeJson(jsonString interface{}) string {
if jsonString == nil {
return ""
Expand Down
72 changes: 72 additions & 0 deletions builtin/providers/aws/resource_aws_s3_bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,24 @@ func TestAccAWSS3Bucket_Cors(t *testing.T) {
})
}

func TestAccAWSS3Bucket_Logging(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketConfigWithLogging,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
testAccCheckAWSS3BucketLogging(
"aws_s3_bucket.bucket", "aws_s3_bucket.log_bucket", "log/"),
),
},
},
})
}

func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).s3conn

Expand Down Expand Up @@ -461,6 +479,45 @@ func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.Te
}
}

func testAccCheckAWSS3BucketLogging(n, b, p string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, _ := s.RootModule().Resources[n]
conn := testAccProvider.Meta().(*AWSClient).s3conn

out, err := conn.GetBucketLogging(&s3.GetBucketLoggingInput{
Bucket: aws.String(rs.Primary.ID),
})

if err != nil {
return fmt.Errorf("GetBucketLogging error: %v", err)
}

tb, _ := s.RootModule().Resources[b]

if v := out.LoggingEnabled.TargetBucket; v == nil {
if tb.Primary.ID != "" {
return fmt.Errorf("bad target bucket, found nil, expected: %s", tb.Primary.ID)
}
} else {
if *v != tb.Primary.ID {
return fmt.Errorf("bad target bucket, expected: %s, got %s", tb.Primary.ID, *v)
}
}

if v := out.LoggingEnabled.TargetPrefix; v == nil {
if p != "" {
return fmt.Errorf("bad target prefix, found nil, expected: %s", p)
}
} else {
if *v != p {
return fmt.Errorf("bad target prefix, expected: %s, got %s", p, *v)
}
}

return nil
}
}

// These need a bit of randomness as the name can only be used once globally
// within AWS
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
Expand Down Expand Up @@ -570,3 +627,18 @@ resource "aws_s3_bucket" "bucket" {
acl = "private"
}
`

var testAccAWSS3BucketConfigWithLogging = fmt.Sprintf(`
resource "aws_s3_bucket" "log_bucket" {
bucket = "tf-test-log-bucket-%d"
acl = "log-delivery-write"
}
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "private"
logging {
target_bucket = "${aws_s3_bucket.log_bucket.id}"
target_prefix = "log/"
}
}
`, randInt, randInt)
23 changes: 23 additions & 0 deletions website/source/docs/providers/aws/r/s3_bucket.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,23 @@ resource "aws_s3_bucket" "b" {
}
```

### Enable Logging

```
resource "aws_s3_bucket" "log_bucket" {
bucket = "my_tf_log_bucket"
acl = "log-delivery-write"
}
resource "aws_s3_bucket" "b" {
bucket = "my_tf_test_bucket"
acl = "private"
logging {
target_bucket = "${aws_s3_bucket.log_bucket.id}"
target_prefix = "log/"
}
}
```

## Argument Reference

The following arguments are supported:
Expand All @@ -83,6 +100,7 @@ The following arguments are supported:
* `website` - (Optional) A website object (documented below).
* `cors_rule` - (Optional) A rule of [Cross-Origin Resource Sharing](http://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) (documented below).
* `versioning` - (Optional) A state of [versioning](http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below)
* `logging` - (Optional) A settings of [bucket logging](http://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below).

The website object supports the following:

Expand All @@ -102,6 +120,11 @@ The versioning supports the following:

* `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket.

The logging supports the following:

* `target_bucket` - (Required) The name of the bucket that will receive the log objects.
* `target_prefix` - (Optional) To specify a key prefix for log objects.

## Attributes Reference

The following attributes are exported:
Expand Down