diff --git a/go.mod b/go.mod index 050b14db5..42b9b3aa7 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.32.7 github.com/aws/aws-sdk-go-v2/config v1.27.18 github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 github.com/pulumi/pulumi-aws-native/sdk v1.17.0 github.com/pulumi/pulumi-aws/sdk/v6 v6.64.0 github.com/pulumi/pulumi-awsx/sdk/v2 v2.19.0 @@ -38,6 +39,10 @@ require ( require ( github.com/BurntSushi/toml v1.2.1 // indirect github.com/aws/aws-sdk-go v1.50.36 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/evertras/bubble-table v0.15.2 // indirect diff --git a/go.sum b/go.sum index 2eeba6d33..c377c26f5 100644 --- a/go.sum +++ b/go.sum @@ -46,6 +46,8 @@ github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3Tj github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2 v1.32.7 h1:ky5o35oENWi0JYWUZkB7WYvVPP+bcRF5/Iq7JWSb5Rw= github.com/aws/aws-sdk-go-v2 v1.32.7/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c= github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c= @@ -60,6 +62,8 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26 h1:zXFLuEuMMUOvEARXFU github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.26/go.mod h1:3o2Wpy0bogG1kyOPrgkXA8pgIfEEv0+m19O9D5+W8y8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 h1:VrFC1uEZjX4ghkm/et8ATVGb1mT75Iv8aPKPjUE+F8A= github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 h1:TCQZX4ztlcWXAcZouKh9qJMcVaH/qTidFTfsvJwUI30= @@ -68,10 +72,16 @@ github.com/aws/aws-sdk-go-v2/service/iam v1.38.3 h1:2sFIoFzU1IEL9epJWubJm9Dhrn45 github.com/aws/aws-sdk-go-v2/service/iam v1.38.3/go.mod h1:KzlNINwfr/47tKkEhgk0r10/OZq3rjtyWy0txL3lM+I= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6 h1:k/f3T13s7wx/By6aKovlVsjdNkRVT0QRR2RlZEvaTGg= github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6/go.mod h1:9n3tkRCngy3+Iw/8vK3C69iXh22SCGsy3yn16nTxH+s= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw= github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ= diff --git a/pkg/manager/context/context.go b/pkg/manager/context/context.go index 281d5fd1a..d30e8c7f5 100644 --- a/pkg/manager/context/context.go +++ b/pkg/manager/context/context.go @@ -2,10 +2,12 @@ package context import ( "fmt" + "os" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" "github.com/redhat-developer/mapt/pkg/integrations/cirrus" "github.com/redhat-developer/mapt/pkg/integrations/github" + "github.com/redhat-developer/mapt/pkg/provider/aws/data" "github.com/redhat-developer/mapt/pkg/util" "github.com/redhat-developer/mapt/pkg/util/logging" utilMaps "github.com/redhat-developer/mapt/pkg/util/maps" @@ -71,15 +73,13 @@ func Init(ca *ContextArgs) error { serverless: ca.Serverless, } addCommonTags() - // Manage integrations - if ca.GHRunnerArgs != nil { - if err := github.InitGHRunnerArgs(ca.GHRunnerArgs); err != nil { - return err - } + // Manage remote state requirements + if err := manageRemoteState(ca.BackedURL); err != nil { + return err } - if ca.CirrusPWArgs != nil { - ca.CirrusPWArgs.Name = RunID() - cirrus.Init(ca.CirrusPWArgs) + // Manage integrations + if err := manageIntegration(ca); err != nil { + return err } logging.Debugf("context initialized for %s", mc.runID) return nil @@ -141,3 +141,38 @@ func addCommonTags() { mc.tags[tagKeyOrigin] = origin mc.tags[TagKeyProjectName] = mc.projectName } + +// Under some circumstances it is poosible we need to update Location initial configuration +// due to usage of remote backed url. i.e. https://github.com/redhat-developer/mapt/issues/392 + +// This function will check if backed url is remote and if so change initial values to be able to +// use it. +func manageRemoteState(backedURL string) error { + if data.ValidateS3Path(backedURL) { + awsRegion, err := data.GetBucketLocationFromS3Path(backedURL) + if err != nil { + return err + } + if err := os.Setenv("AWS_DEFAULT_REGION", *awsRegion); err != nil { + return err + } + if err := os.Setenv("AWS_REGION", *awsRegion); err != nil { + return err + } + return nil + } + return nil +} + +func manageIntegration(ca *ContextArgs) error { + if ca.GHRunnerArgs != nil { + if err := github.InitGHRunnerArgs(ca.GHRunnerArgs); err != nil { + return err + } + } + if ca.CirrusPWArgs != nil { + ca.CirrusPWArgs.Name = RunID() + cirrus.Init(ca.CirrusPWArgs) + } + return nil +} diff --git a/pkg/provider/aws/data/s3.go b/pkg/provider/aws/data/s3.go new file mode 100644 index 000000000..cb5477150 --- /dev/null +++ b/pkg/provider/aws/data/s3.go @@ -0,0 +1,51 @@ +package data + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +const ( + s3Prefix = "s3://" + s3PathSeparator = "/" +) + +func ValidateS3Path(p string) bool { return strings.HasPrefix(p, s3Prefix) } + +func GetBucketLocationFromS3Path(p string) (*string, error) { + bucket, err := getBucketFromS3Path(p) + if err != nil { + return nil, err + } + return GetBucketLocation(*bucket) +} + +func GetBucketLocation(bucketName string) (*string, error) { + cfg, err := getGlobalConfig() + if err != nil { + return nil, err + } + client := s3.NewFromConfig(cfg) + output, err := client.GetBucketLocation( + context.Background(), + &s3.GetBucketLocationInput{ + Bucket: &bucketName, + }) + if err != nil { + return nil, fmt.Errorf("failed to get bucket location: %w", err) + } + region := string(output.LocationConstraint) + return ®ion, nil +} + +func getBucketFromS3Path(p string) (*string, error) { + if !ValidateS3Path(p) { + return nil, fmt.Errorf("this is not a valid s3 path") + } + pWithoutProtocol := strings.TrimPrefix(p, s3Prefix) + pParts := strings.Split(pWithoutProtocol, s3PathSeparator) + return &pParts[0], nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go new file mode 100644 index 000000000..fe63fedad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go @@ -0,0 +1,92 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an arn +// by looking for whether the string starts with arn: +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md new file mode 100644 index 000000000..186c48892 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -0,0 +1,110 @@ +# v1.6.2 (2024-03-29) + +* No change notes available for this release. + +# v1.6.1 (2024-02-21) + +* No change notes available for this release. + +# v1.6.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + +# v1.5.4 (2023-12-07) + +* No change notes available for this release. + +# v1.5.3 (2023-11-30) + +* No change notes available for this release. + +# v1.5.2 (2023-11-29) + +* No change notes available for this release. + +# v1.5.1 (2023-11-15) + +* No change notes available for this release. + +# v1.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.4.14 (2023-10-06) + +* No change notes available for this release. + +# v1.4.13 (2023-08-18) + +* No change notes available for this release. + +# v1.4.12 (2023-08-07) + +* No change notes available for this release. + +# v1.4.11 (2023-07-31) + +* No change notes available for this release. + +# v1.4.10 (2022-12-02) + +* No change notes available for this release. + +# v1.4.9 (2022-10-24) + +* No change notes available for this release. + +# v1.4.8 (2022-09-14) + +* No change notes available for this release. + +# v1.4.7 (2022-09-02) + +* No change notes available for this release. + +# v1.4.6 (2022-08-31) + +* No change notes available for this release. + +# v1.4.5 (2022-08-29) + +* No change notes available for this release. + +# v1.4.4 (2022-08-09) + +* No change notes available for this release. + +# v1.4.3 (2022-06-29) + +* No change notes available for this release. + +# v1.4.2 (2022-06-07) + +* No change notes available for this release. + +# v1.4.1 (2022-03-24) + +* No change notes available for this release. + +# v1.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.0.0 (2021-11-06) + +* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. +* **Release**: Protocol support has been added for AWS event stream. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go new file mode 100644 index 000000000..151054971 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go new file mode 100644 index 000000000..d9ab7652f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go @@ -0,0 +1,218 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// DecoderOptions is the Decoder configuration options. +type DecoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + options DecoderOptions +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder { + options := DecoderOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Decoder{ + options: options, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the stream. +// +// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers +// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying +// payloadBuf byte slice. +func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + if d.options.Logger != nil && d.options.LogMessages { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.options.Logger, debugMsgBuf, m, err) + }() + } + + m, err = decodeMessage(reader, payloadBuf) + + return m, err +} + +// decodeMessage attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the reader. +func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return v, err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return b[0], err +} + +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} + +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} + +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go new file mode 100644 index 000000000..f03ee4b93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go @@ -0,0 +1,167 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// EncoderOptions is the configuration options for Encoder. +type EncoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Encoder provides EventStream message encoding. +type Encoder struct { + options EncoderOptions + + headersBuf *bytes.Buffer + messageBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages. +func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder { + o := EncoderOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &Encoder{ + options: o, + headersBuf: bytes.NewBuffer(nil), + messageBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(w io.Writer, msg Message) (err error) { + e.headersBuf.Reset() + e.messageBuf.Reset() + + var writer io.Writer = e.messageBuf + if e.options.Logger != nil && e.options.LogMessages { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil { + return err + } + + _, err = io.Copy(w, e.messageBuf) + + return err +} + +func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go new file mode 100644 index 000000000..5481ef307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go new file mode 100644 index 000000000..93ea71ffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go @@ -0,0 +1,24 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + ContentTypeHeader = ":content-type" // message payload content-type + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go new file mode 100644 index 000000000..d07ff6b89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go @@ -0,0 +1,71 @@ +package eventstreamapi + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +type eventStreamWriterKey struct{} + +// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream. +func GetInputStreamWriter(ctx context.Context) io.WriteCloser { + writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser) + return writeCloser +} + +func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context { + return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser) +} + +// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages +// via the HTTP request body. +type InitializeStreamWriter struct{} + +// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack. +func AddInitializeStreamWriter(stack *middleware.Stack) error { + return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After) +} + +// ID returns the identifier for the middleware. +func (i *InitializeStreamWriter) ID() string { + return "InitializeStreamWriter" +} + +// HandleFinalize is the middleware implementation. +func (i *InitializeStreamWriter) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + inputReader, inputWriter := io.Pipe() + defer func() { + if err == nil { + return + } + _ = inputReader.Close() + _ = inputWriter.Close() + }() + + request, err = request.SetStream(inputReader) + if err != nil { + return out, metadata, err + } + in.Request = request + + ctx = setInputStreamWriter(ctx, inputWriter) + + out, metadata, err = next.HandleFinalize(ctx, in) + if err != nil { + return out, metadata, err + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 000000000..cbf5a2862 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,13 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +// +// This operation is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go new file mode 100644 index 000000000..7d10ec2eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go @@ -0,0 +1,12 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + r.Header.Set("Expect", "100-continue") + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go new file mode 100644 index 000000000..a0479b9be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package eventstream + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.6.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go new file mode 100644 index 000000000..f6f8c5674 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go new file mode 100644 index 000000000..423b6bb26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go @@ -0,0 +1,521 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + var scratch [36]byte + + const dash = '-' + + hex.Encode(scratch[:8], v[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], v[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], v[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], v[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], v[10:]) + + return string(scratch[:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go new file mode 100644 index 000000000..f7427da03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md new file mode 100644 index 000000000..0f10e0228 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -0,0 +1,217 @@ +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.28 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.27 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.25 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.24 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.23 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.22 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.21 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.20 (2023-02-14) + +* No change notes available for this release. + +# v1.0.19 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.18 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.17 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.16 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.15 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.14 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.13 (2022-09-14) + +* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.12 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.11 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.10 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.9 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.8 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-04-07) + +* **Release**: New internal v4a signing module location. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go new file mode 100644 index 000000000..3ae3a019e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go @@ -0,0 +1,141 @@ +package v4a + +import ( + "context" + "crypto/ecdsa" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// Credentials is Context, ECDSA, and Optional Session Token that can be used +// to sign requests using SigV4a +type Credentials struct { + Context string + PrivateKey *ecdsa.PrivateKey + SessionToken string + + // Time the credentials will expire. + CanExpire bool + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + return !v.Expires.After(sdk.NowTime()) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.Context) > 0 && v.PrivateKey != nil +} + +// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials +// to a ECDSA PrivateKey for signing with SiV4a +type SymmetricCredentialAdaptor struct { + SymmetricProvider aws.CredentialsProvider + + asymmetric atomic.Value + m sync.Mutex +} + +// Retrieve retrieves symmetric credentials from the underlying provider. +func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) { + symCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return aws.Credentials{}, err + } + + if asymCreds := s.getCreds(); asymCreds == nil { + return symCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + asymCreds := s.getCreds() + if asymCreds == nil { + return symCreds, nil + } + + // if the context does not match the access key id clear it + if asymCreds.Context != symCreds.AccessKeyID { + s.asymmetric.Store((*Credentials)(nil)) + } + + return symCreds, nil +} + +// RetrievePrivateKey returns credentials suitable for SigV4a signing +func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) { + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err) + } + + privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey) + if err != nil { + return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials") + } + + creds := Credentials{ + Context: symmetricCreds.AccessKeyID, + PrivateKey: privateKey, + SessionToken: symmetricCreds.SessionToken, + CanExpire: symmetricCreds.CanExpire, + Expires: symmetricCreds.Expires, + } + + s.asymmetric.Store(&creds) + + return creds, nil +} + +func (s *SymmetricCredentialAdaptor) getCreds() *Credentials { + v := s.asymmetric.Load() + + if v == nil { + return nil + } + + c := v.(*Credentials) + if c != nil && c.HasKeys() && !c.Expired() { + return c + } + + return nil +} + +func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) { + credentials, err := s.SymmetricProvider.Retrieve(ctx) + if err != nil { + return aws.Credentials{}, err + } + + return credentials, nil +} + +// CredentialsProvider is the interface for a provider to retrieve credentials +// to sign requests with. +type CredentialsProvider interface { + RetrievePrivateKey(context.Context) (Credentials, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go new file mode 100644 index 000000000..380d17427 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go @@ -0,0 +1,17 @@ +package v4a + +import "fmt" + +// SigningError indicates an error condition occurred while performing SigV4a signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go new file mode 100644 index 000000000..51aa32cf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package v4a + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.3.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go new file mode 100644 index 000000000..1d0f25f8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go @@ -0,0 +1,30 @@ +package crypto + +import "fmt" + +// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison +// if the two byte slices assuming they represent a big-endian number. +// +// error if len(x) != len(y) +// -1 if x < y +// 0 if x == y +// +1 if x > y +func ConstantTimeByteCompare(x, y []byte) (int, error) { + if len(x) != len(y) { + return 0, fmt.Errorf("slice lengths do not match") + } + + xLarger, yLarger := 0, 0 + + for i := 0; i < len(x); i++ { + xByte, yByte := int(x[i]), int(y[i]) + + x := ((yByte - xByte) >> 8) & 1 + y := ((xByte - yByte) >> 8) & 1 + + xLarger |= x &^ yLarger + yLarger |= y &^ xLarger + } + + return xLarger - yLarger, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go new file mode 100644 index 000000000..758c73fcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go @@ -0,0 +1,113 @@ +package crypto + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "encoding/asn1" + "encoding/binary" + "fmt" + "hash" + "math" + "math/big" +) + +type ecdsaSignature struct { + R, S *big.Int +} + +// ECDSAKey takes the given elliptic curve, and private key (d) byte slice +// and returns the private ECDSA key. +func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { + return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d)) +} + +// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the +// private and public keypair +func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { + pX, pY := curve.ScalarBaseMult(d.Bytes()) + + privKey := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: pX, + Y: pY, + }, + D: d, + } + + return privKey +} + +// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns +// *ecdsa.PublicKey. Returns an error if the given points are not on the curve. +func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { + xPoint := (&big.Int{}).SetBytes(x) + yPoint := (&big.Int{}).SetBytes(y) + + if !curve.IsOnCurve(xPoint, yPoint) { + return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) + } + + return &ecdsa.PublicKey{ + Curve: curve, + X: xPoint, + Y: yPoint, + }, nil +} + +// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns +// whether the given signature is valid. +func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) { + var ecdsaSignature ecdsaSignature + + _, err := asn1.Unmarshal(signature, &ecdsaSignature) + if err != nil { + return false, err + } + + return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil +} + +// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode. +// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of +// `r` is defined as a 4 byte counter. +func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) { + // verify that we won't overflow the counter + n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size()))) + if n > 0x7FFFFFFF { + return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen) + } + + // verify the requested bit length is not larger then the length encoding size + if int64(bitLen) > 0x7FFFFFFF { + return nil, fmt.Errorf("bitLen is greater than 32-bits") + } + + fixedInput := bytes.NewBuffer(nil) + fixedInput.Write(label) + fixedInput.WriteByte(0x00) + fixedInput.Write(context) + if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil { + return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err) + } + + var output []byte + + h := hmac.New(hash, key) + + for i := int64(1); i <= n; i++ { + h.Reset() + if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil { + return nil, err + } + _, err := h.Write(fixedInput.Bytes()) + if err != nil { + return nil, err + } + output = append(output, h.Sum(nil)...) + } + + return output[:bitLen/8], nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go new file mode 100644 index 000000000..89a76e2ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go @@ -0,0 +1,36 @@ +package v4 + +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go new file mode 100644 index 000000000..a15177e8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for whitelisting +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// DenyList is a generic Rule for blacklisting +type DenyList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b DenyList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go new file mode 100644 index 000000000..3487dc335 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -0,0 +1,67 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + DenyList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a whitelist for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + DenyList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go new file mode 100644 index 000000000..e7fa7a1b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go new file mode 100644 index 000000000..bf93659a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go new file mode 100644 index 000000000..1de06a765 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go new file mode 100644 index 000000000..741019b5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go @@ -0,0 +1,64 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL +func GetURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go new file mode 100644 index 000000000..64b8b4e33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go @@ -0,0 +1,118 @@ +package v4a + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "time" +) + +// HTTPSigner is SigV4a HTTP signer implementation +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error +} + +// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware. +type SignHTTPRequestMiddlewareOptions struct { + Credentials CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a. +type SignHTTPRequestMiddleware struct { + credentials CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions. +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentials: options.Credentials, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID the middleware identifier. +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize signs an HTTP request using SigV4a. +func (s *SignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !hasCredentialProvider(s.credentials) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request) + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentials.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + signerOptions := []func(o *SignerOptions){ + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }, + } + + // existing DisableURIPathEscaping is equivalent in purpose + // to authentication scheme property DisableDoubleEncoding + disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) + if overridden { + signerOptions = append(signerOptions, func(o *SignerOptions) { + o.DisableURIPathEscaping = disableDoubleEncoding + }) + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), signerOptions...) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + return next.HandleFinalize(ctx, in) +} + +func hasCredentialProvider(p CredentialsProvider) bool { + if p == nil { + return false + } + + return true +} + +// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go new file mode 100644 index 000000000..951fc415d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go @@ -0,0 +1,117 @@ +package v4a + +import ( + "context" + "fmt" + "net/http" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4a signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !hasCredentialProvider(s.credentialsProvider) { + out.Result = &v4.PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &v4.PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go new file mode 100644 index 000000000..516d459d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -0,0 +1,86 @@ +package v4a + +import ( + "context" + "fmt" + "time" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// CredentialsAdapter adapts v4a.Credentials to smithy auth.Identity. +type CredentialsAdapter struct { + Credentials Credentials +} + +var _ auth.Identity = (*CredentialsAdapter)(nil) + +// Expiration returns the time of expiration for the credentials. +func (v *CredentialsAdapter) Expiration() time.Time { + return v.Credentials.Expires +} + +// CredentialsProviderAdapter adapts v4a.CredentialsProvider to +// auth.IdentityResolver. +type CredentialsProviderAdapter struct { + Provider CredentialsProvider +} + +var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) + +// GetIdentity retrieves v4a credentials using the underlying provider. +func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + creds, err := v.Provider.RetrievePrivateKey(ctx) + if err != nil { + return nil, fmt.Errorf("get credentials: %w", err) + } + + return &CredentialsAdapter{Credentials: creds}, nil +} + +// SignerAdapter adapts v4a.HTTPSigner to smithy http.Signer. +type SignerAdapter struct { + Signer HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*SignerAdapter)(nil) + +// SignRequest signs the request with the provided identity. +func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4a signing name is required") + } + + regions, ok := smithyhttp.GetSigV4ASigningRegions(&props) + if !ok { + return fmt.Errorf("sigv4a signing region is required") + } + + hash := v4.GetPayloadHash(ctx) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %w", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go new file mode 100644 index 000000000..f1f6ecc37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go @@ -0,0 +1,520 @@ +package v4a + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "math/big" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto" + v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + // AmzRegionSetKey represents the region set header used for sigv4a + AmzRegionSetKey = "X-Amz-Region-Set" + amzAlgorithmKey = v4Internal.AmzAlgorithmKey + amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey + amzDateKey = v4Internal.AmzDateKey + amzCredentialKey = v4Internal.AmzCredentialKey + amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey + authorizationHeader = "Authorization" + + signingAlgorithm = "AWS4-ECDSA-P256-SHA256" + + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string + EmptyStringSHA256 = v4Internal.EmptyStringSHA256 + + // Version of signing v4a + Version = "SigV4A" +) + +var ( + p256 elliptic.Curve + nMinusTwoP256 *big.Int + + one = new(big.Int).SetInt64(1) +) + +func init() { + // Ensure the elliptic curve parameters are initialized on package import rather then on first usage + p256 = elliptic.P256() + + nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes()) + nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2)) +} + +// SignerOptions is the SigV4a signing options for constructing a Signer. +type SignerOptions struct { + Logger logging.Logger + LogSigning bool + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool +} + +// Signer is a SigV4a HTTP signing implementation +type Signer struct { + options SignerOptions +} + +// NewSigner constructs a SigV4a Signer. +func NewSigner(optFns ...func(*SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options} +} + +// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given +// IAM AccessKey and SecretKey pair. +// +// Based on FIPS.186-4 Appendix B.4.2 +func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) { + params := p256.Params() + bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits + counter := 0x01 + + buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey) + kdfContext := bytes.NewBuffer(buffer) + + inputKey := append([]byte("AWS4A"), []byte(secretKey)...) + + d := new(big.Int) + for { + kdfContext.Reset() + kdfContext.WriteString(accessKey) + kdfContext.WriteByte(byte(counter)) + + key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes()) + if err != nil { + return nil, err + } + + // Check key first before calling SetBytes if key key is in fact a valid candidate. + // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time + cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes()) + if err != nil { + return nil, err + } + if cmp == -1 { + d.SetBytes(key) + break + } + + counter++ + if counter > 0xFF { + return nil, fmt.Errorf("exhausted single byte external counter") + } + } + d = d.Add(d, one) + + priv := new(ecdsa.PrivateKey) + priv.PublicKey.Curve = p256 + priv.D = d + priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) + + return priv, nil +} + +type httpSigner struct { + Request *http.Request + ServiceName string + RegionSet []string + Time time.Time + Credentials Credentials + IsPreSign bool + + Logger logging.Logger + Debug bool + + // PayloadHash is the hex encoded SHA-256 hash of the request payload + // If len(PayloadHash) == 0 the signer will attempt to send the request + // as an unsigned payload. Note: Unsigned payloads only work for a subset of services. + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a. +// The passed in request will be modified in place. +func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + return nil +} + +// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a +// Returns the presigned URL along with the headers that were signed with the request. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.Format(timeFormat) + + if s.IsPreSign { + query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + query.Set(amzDateKey, amzDate) + query.Set(amzAlgorithmKey, signingAlgorithm) + if len(s.Credentials.SessionToken) > 0 { + query.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } + return + } + + headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + headers.Set(amzDateKey, amzDate) + if len(s.Credentials.SessionToken) > 0 { + headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.Context + "/" + credentialScope + if s.IsPreSign { + query.Set(amzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(amzSignedHeadersKey, signedHeadersStr) + } + + rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery, + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery += "&X-Amz-Signature=" + signingSignature + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + len(commaSpace) + + len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +func (s *httpSigner) buildCredentialScope() string { + return strings.Join([]string{ + s.Time.Format(shortTimeFormat), + s.ServiceName, + "aws4_request", + }, "/") + +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + if length > 0 { + const contentLengthHeader = "content-length" + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.Format(timeFormat), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256) + if err != nil { + return "", err + } + return hex.EncodeToString(sig), nil +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if r.PreSigned { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md new file mode 100644 index 000000000..2246bd62e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -0,0 +1,243 @@ +# v1.3.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.36 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.35 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.34 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.27 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.26 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.25 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.24 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.23 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.22 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.21 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.20 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.19 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.18 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-04-27) + +* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. + +# v1.1.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2022-03-08) + +* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-02-24) + +* **Release**: New module for computing checksums +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go new file mode 100644 index 000000000..a17041c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -0,0 +1,323 @@ +package checksum + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "hash/crc32" + "io" + "strings" + "sync" +) + +// Algorithm represents the checksum algorithms supported +type Algorithm string + +// Enumeration values for supported checksum Algorithms. +const ( + // AlgorithmCRC32C represents CRC32C hash algorithm + AlgorithmCRC32C Algorithm = "CRC32C" + + // AlgorithmCRC32 represents CRC32 hash algorithm + AlgorithmCRC32 Algorithm = "CRC32" + + // AlgorithmSHA1 represents SHA1 hash algorithm + AlgorithmSHA1 Algorithm = "SHA1" + + // AlgorithmSHA256 represents SHA256 hash algorithm + AlgorithmSHA256 Algorithm = "SHA256" +) + +var supportedAlgorithms = []Algorithm{ + AlgorithmCRC32C, + AlgorithmCRC32, + AlgorithmSHA1, + AlgorithmSHA256, +} + +func (a Algorithm) String() string { return string(a) } + +// ParseAlgorithm attempts to parse the provided value into a checksum +// algorithm, matching without case. Returns the algorithm matched, or an error +// if the algorithm wasn't matched. +func ParseAlgorithm(v string) (Algorithm, error) { + for _, a := range supportedAlgorithms { + if strings.EqualFold(string(a), v) { + return a, nil + } + } + return "", fmt.Errorf("unknown checksum algorithm, %v", v) +} + +// FilterSupportedAlgorithms filters the set of algorithms, returning a slice +// of algorithms that are supported. +func FilterSupportedAlgorithms(vs []string) []Algorithm { + found := map[Algorithm]struct{}{} + + supported := make([]Algorithm, 0, len(supportedAlgorithms)) + for _, v := range vs { + for _, a := range supportedAlgorithms { + // Only consider algorithms that are supported + if !strings.EqualFold(v, string(a)) { + continue + } + // Ignore duplicate algorithms in list. + if _, ok := found[a]; ok { + continue + } + + supported = append(supported, a) + found[a] = struct{}{} + } + } + return supported +} + +// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is +// returned if the algorithm is unknown. +func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { + switch v { + case AlgorithmSHA1: + return sha1.New(), nil + case AlgorithmSHA256: + return sha256.New(), nil + case AlgorithmCRC32: + return crc32.NewIEEE(), nil + case AlgorithmCRC32C: + return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + default: + return nil, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +// AlgorithmChecksumLength returns the length of the algorithm's checksum in +// bytes. If the algorithm is not known, an error is returned. +func AlgorithmChecksumLength(v Algorithm) (int, error) { + switch v { + case AlgorithmSHA1: + return sha1.Size, nil + case AlgorithmSHA256: + return sha256.Size, nil + case AlgorithmCRC32: + return crc32.Size, nil + case AlgorithmCRC32C: + return crc32.Size, nil + default: + return 0, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +const awsChecksumHeaderPrefix = "x-amz-checksum-" + +// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash. +func AlgorithmHTTPHeader(v Algorithm) string { + return awsChecksumHeaderPrefix + strings.ToLower(string(v)) +} + +// base64EncodeHashSum computes base64 encoded checksum of a given running +// hash. The running hash must already have content written to it. Returns the +// byte slice of checksum and an error +func base64EncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64 +} + +// hexEncodeHashSum computes hex encoded checksum of a given running hash. The +// running hash must already have content written to it. Returns the byte slice +// of checksum and an error +func hexEncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sumHex := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(sumHex, sum) + return sumHex +} + +// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents. +// Returns the byte slice of MD5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + + // Copy errors may be assumed to be from the body. + if _, err := io.Copy(h, r); err != nil { + return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err) + } + + // Encode the MD5 checksum in base64. + return base64EncodeHashSum(h), nil +} + +// computeChecksumReader provides a reader wrapping an underlying io.Reader to +// compute the checksum of the stream's bytes. +type computeChecksumReader struct { + stream io.Reader + algorithm Algorithm + hasher hash.Hash + base64ChecksumLen int + + mux sync.RWMutex + lockedChecksum string + lockedErr error +} + +// newComputeChecksumReader returns a computeChecksumReader for the stream and +// algorithm specified. Returns error if unable to create the reader, or +// algorithm is unknown. +func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + checksumLength, err := AlgorithmChecksumLength(algorithm) + if err != nil { + return nil, err + } + + return &computeChecksumReader{ + stream: io.TeeReader(stream, hasher), + algorithm: algorithm, + hasher: hasher, + base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength), + }, nil +} + +// Read wraps the underlying reader. When the underlying reader returns EOF, +// the checksum of the reader will be computed, and can be retrieved with +// ChecksumBase64String. +func (r *computeChecksumReader) Read(p []byte) (int, error) { + n, err := r.stream.Read(p) + if err == nil { + return n, nil + } else if err != io.EOF { + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedErr = err + return n, err + } + + b := base64EncodeHashSum(r.hasher) + + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedChecksum = string(b) + + return n, err +} + +func (r *computeChecksumReader) Algorithm() Algorithm { + return r.algorithm +} + +// Base64ChecksumLength returns the base64 encoded length of the checksum for +// algorithm. +func (r *computeChecksumReader) Base64ChecksumLength() int { + return r.base64ChecksumLen +} + +// Base64Checksum returns the base64 checksum for the algorithm, or error if +// the underlying reader returned a non-EOF error. +// +// Safe to be called concurrently, but will return an error until after the +// underlying reader is returns EOF. +func (r *computeChecksumReader) Base64Checksum() (string, error) { + r.mux.RLock() + defer r.mux.RUnlock() + + if r.lockedErr != nil { + return "", r.lockedErr + } + + if r.lockedChecksum == "" { + return "", fmt.Errorf( + "checksum not available yet, called before reader returns EOF", + ) + } + + return r.lockedChecksum, nil +} + +// validateChecksumReader implements io.ReadCloser interface. The wrapper +// performs checksum validation when the underlying reader has been fully read. +type validateChecksumReader struct { + originalBody io.ReadCloser + body io.Reader + hasher hash.Hash + algorithm Algorithm + expectChecksum string +} + +// newValidateChecksumReader returns a configured io.ReadCloser that performs +// checksum validation when the underlying reader has been fully read. +func newValidateChecksumReader( + body io.ReadCloser, + algorithm Algorithm, + expectChecksum string, +) (*validateChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + return &validateChecksumReader{ + originalBody: body, + body: io.TeeReader(body, hasher), + hasher: hasher, + algorithm: algorithm, + expectChecksum: expectChecksum, + }, nil +} + +// Read attempts to read from the underlying stream while also updating the +// running hash. If the underlying stream returns with an EOF error, the +// checksum of the stream will be collected, and compared against the expected +// checksum. If the checksums do not match, an error will be returned. +// +// If a non-EOF error occurs when reading the underlying stream, that error +// will be returned and the checksum for the stream will be discarded. +func (c *validateChecksumReader) Read(p []byte) (n int, err error) { + n, err = c.body.Read(p) + if err == io.EOF { + if checksumErr := c.validateChecksum(); checksumErr != nil { + return n, checksumErr + } + } + + return n, err +} + +// Close closes the underlying reader, returning any error that occurred in the +// underlying reader. +func (c *validateChecksumReader) Close() (err error) { + return c.originalBody.Close() +} + +func (c *validateChecksumReader) validateChecksum() error { + // Compute base64 encoded checksum hash of the payload's read bytes. + v := base64EncodeHashSum(c.hasher) + if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) { + return validationError{ + Algorithm: c.algorithm, Expect: e, Actual: a, + } + } + + return nil +} + +type validationError struct { + Algorithm Algorithm + Expect string + Actual string +} + +func (v validationError) Error() string { + return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v", + v.Algorithm, v.Expect, v.Actual) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go new file mode 100644 index 000000000..3bd320c43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go @@ -0,0 +1,389 @@ +package checksum + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + crlf = "\r\n" + + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + defaultChunkLength = 1024 * 64 + + awsTrailerHeaderName = "x-amz-trailer" + decodedContentLengthHeaderName = "x-amz-decoded-content-length" + + contentEncodingHeaderName = "content-encoding" + awsChunkedContentEncodingHeaderValue = "aws-chunked" + + trailerKeyValueSeparator = ":" +) + +var ( + crlfBytes = []byte(crlf) + finalChunkBytes = []byte("0" + crlf) +) + +type awsChunkedEncodingOptions struct { + // The total size of the stream. For unsigned encoding this implies that + // there will only be a single chunk containing the underlying payload, + // unless ChunkLength is also specified. + StreamLength int64 + + // Set of trailer key:value pairs that will be appended to the end of the + // payload after the end chunk has been written. + Trailers map[string]awsChunkedTrailerValue + + // The maximum size of each chunk to be sent. Default value of -1, signals + // that optimal chunk length will be used automatically. ChunkSize must be + // at least 8KB. + // + // If ChunkLength and StreamLength are both specified, the stream will be + // broken up into ChunkLength chunks. The encoded length of the aws-chunked + // encoding can still be determined as long as all trailers, if any, have a + // fixed length. + ChunkLength int +} + +type awsChunkedTrailerValue struct { + // Function to retrieve the value of the trailer. Will only be called after + // the underlying stream returns EOF error. + Get func() (string, error) + + // If the length of the value can be pre-determined, and is constant + // specify the length. A value of -1 means the length is unknown, or + // cannot be pre-determined. + Length int +} + +// awsChunkedEncoding provides a reader that wraps the payload such that +// payload is read as a single aws-chunk payload. This reader can only be used +// if the content length of payload is known. Content-Length is used as size of +// the single payload chunk. The final chunk and trailing checksum is appended +// at the end. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition +// +// Here is the aws-chunked payload stream as read from the awsChunkedEncoding +// if original request stream is "Hello world", and checksum hash used is SHA256 +// +// \r\n +// Hello world\r\n +// 0\r\n +// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n +// \r\n +type awsChunkedEncoding struct { + options awsChunkedEncodingOptions + + encodedStream io.Reader + trailerEncodedLength int +} + +// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured +// for unsigned aws-chunked content encoding. Any additional trailers that need +// to be appended after the end chunk must be included as via Trailer +// callbacks. +func newUnsignedAWSChunkedEncoding( + stream io.Reader, + optFns ...func(*awsChunkedEncodingOptions), +) *awsChunkedEncoding { + options := awsChunkedEncodingOptions{ + Trailers: map[string]awsChunkedTrailerValue{}, + StreamLength: -1, + ChunkLength: -1, + } + for _, fn := range optFns { + fn(&options) + } + + var chunkReader io.Reader + if options.ChunkLength != -1 || options.StreamLength == -1 { + if options.ChunkLength == -1 { + options.ChunkLength = defaultChunkLength + } + chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength) + } else { + chunkReader = newUnsignedChunkReader(stream, options.StreamLength) + } + + trailerReader := newAWSChunkedTrailerReader(options.Trailers) + + return &awsChunkedEncoding{ + options: options, + encodedStream: io.MultiReader(chunkReader, + trailerReader, + bytes.NewBuffer(crlfBytes), + ), + trailerEncodedLength: trailerReader.EncodedLength(), + } +} + +// EncodedLength returns the final length of the aws-chunked content encoded +// stream if it can be determined without reading the underlying stream or lazy +// header values, otherwise -1 is returned. +func (e *awsChunkedEncoding) EncodedLength() int64 { + var length int64 + if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 { + return -1 + } + + if e.options.StreamLength != 0 { + // If the stream length is known, and there is no chunk length specified, + // only a single chunk will be used. Otherwise the stream length needs to + // include the multiple chunk padding content. + if e.options.ChunkLength == -1 { + length += getUnsignedChunkBytesLength(e.options.StreamLength) + + } else { + // Compute chunk header and payload length + numChunks := e.options.StreamLength / int64(e.options.ChunkLength) + length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength)) + if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 { + length += getUnsignedChunkBytesLength(remainder) + } + } + } + + // End chunk + length += int64(len(finalChunkBytes)) + + // Trailers + length += int64(e.trailerEncodedLength) + + // Encoding terminator + length += int64(len(crlf)) + + return length +} + +func getUnsignedChunkBytesLength(payloadLength int64) int64 { + payloadLengthStr := strconv.FormatInt(payloadLength, 16) + return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf)) +} + +// HTTPHeaders returns the set of headers that must be included the request for +// aws-chunked to work. This includes the content-encoding: aws-chunked header. +// +// If there are multiple layered content encoding, the aws-chunked encoding +// must be appended to the previous layers the stream's encoding. The best way +// to do this is to append all header values returned to the HTTP request's set +// of headers. +func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string { + headers := map[string][]string{ + contentEncodingHeaderName: { + awsChunkedContentEncodingHeaderValue, + }, + } + + if len(e.options.Trailers) != 0 { + trailers := make([]string, 0, len(e.options.Trailers)) + for name := range e.options.Trailers { + trailers = append(trailers, strings.ToLower(name)) + } + headers[awsTrailerHeaderName] = trailers + } + + return headers +} + +func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) { + return e.encodedStream.Read(b) +} + +// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked +// content encoded trailers. The trailer values will not be retrieved until the +// reader is read from. +type awsChunkedTrailerReader struct { + reader *bytes.Buffer + trailers map[string]awsChunkedTrailerValue + trailerEncodedLength int +} + +// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to +// lazy reading aws-chunk content encoded trailers. +func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader { + return &awsChunkedTrailerReader{ + trailers: trailers, + trailerEncodedLength: trailerEncodedLength(trailers), + } +} + +func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) { + for name, trailer := range trailers { + length += len(name) + len(trailerKeyValueSeparator) + l := trailer.Length + if l == -1 { + return -1 + } + length += l + len(crlf) + } + + return length +} + +// EncodedLength returns the length of the encoded trailers if the length could +// be determined without retrieving the header values. Returns -1 if length is +// unknown. +func (r *awsChunkedTrailerReader) EncodedLength() (length int) { + return r.trailerEncodedLength +} + +// Read populates the passed in byte slice with bytes from the encoded +// trailers. Will lazy read header values first time Read is called. +func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) { + if r.trailerEncodedLength == 0 { + return 0, io.EOF + } + + if r.reader == nil { + trailerLen := r.trailerEncodedLength + if r.trailerEncodedLength == -1 { + trailerLen = 0 + } + r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen)) + for name, trailer := range r.trailers { + r.reader.WriteString(name) + r.reader.WriteString(trailerKeyValueSeparator) + v, err := trailer.Get() + if err != nil { + return 0, fmt.Errorf("failed to get trailer value, %w", err) + } + r.reader.WriteString(v) + r.reader.WriteString(crlf) + } + } + + return r.reader.Read(p) +} + +// newUnsignedChunkReader returns an io.Reader encoding the underlying reader +// as unsigned aws-chunked chunks. The returned reader will also include the +// end chunk, but not the aws-chunked final `crlf` segment so trailers can be +// added. +// +// If the payload size is -1 for unknown length the content will be buffered in +// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding. +func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader { + if payloadSize == -1 { + return newBufferedAWSChunkReader(reader, defaultChunkLength) + } + + var endChunk bytes.Buffer + if payloadSize == 0 { + endChunk.Write(finalChunkBytes) + return &endChunk + } + + endChunk.WriteString(crlf) + endChunk.Write(finalChunkBytes) + + var header bytes.Buffer + header.WriteString(strconv.FormatInt(payloadSize, 16)) + header.WriteString(crlf) + return io.MultiReader( + &header, + reader, + &endChunk, + ) +} + +// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader. +// Will include end chunk, but not the aws-chunked final `crlf` segment so +// trailers can be added. +// +// Note does not implement support for chunk extensions, e.g. chunk signing. +type bufferedAWSChunkReader struct { + reader io.Reader + chunkSize int + chunkSizeStr string + + headerBuffer *bytes.Buffer + chunkBuffer *bytes.Buffer + + multiReader io.Reader + multiReaderLen int + endChunkDone bool +} + +// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading +// aws-chunked encoded chunks. +func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader { + return &bufferedAWSChunkReader{ + reader: reader, + chunkSize: chunkSize, + chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16), + + headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)), + chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))), + } +} + +// Read attempts to read from the underlying io.Reader writing aws-chunked +// chunk encoded bytes to p. When the underlying io.Reader has been completed +// read the end chunk will be available. Once the end chunk is read, the reader +// will return EOF. +func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) { + if r.multiReaderLen == 0 && r.endChunkDone { + return 0, io.EOF + } + if r.multiReader == nil || r.multiReaderLen == 0 { + r.multiReader, r.multiReaderLen, err = r.newMultiReader() + if err != nil { + return 0, err + } + } + + n, err = r.multiReader.Read(p) + r.multiReaderLen -= n + + if err == io.EOF && !r.endChunkDone { + // Edge case handling when the multi-reader has been completely read, + // and returned an EOF, make sure that EOF only gets returned if the + // end chunk was included in the multi-reader. Otherwise, the next call + // to read will initialize the next chunk's multi-reader. + err = nil + } + return n, err +} + +// newMultiReader returns a new io.Reader for wrapping the next chunk. Will +// return an error if the underlying reader can not be read from. Will never +// return io.EOF. +func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) { + // io.Copy eats the io.EOF returned by io.LimitReader. Any error that + // occurs here is due to an actual read error. + n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize))) + if err != nil { + return nil, 0, err + } + if n == 0 { + // Early exit writing out only the end chunk. This does not include + // aws-chunk's final `crlf` so that trailers can still be added by + // upstream reader. + r.headerBuffer.Reset() + r.headerBuffer.WriteString("0") + r.headerBuffer.WriteString(crlf) + r.endChunkDone = true + + return r.headerBuffer, r.headerBuffer.Len(), nil + } + r.chunkBuffer.WriteString(crlf) + + chunkSizeStr := r.chunkSizeStr + if int(n) != r.chunkSize { + chunkSizeStr = strconv.FormatInt(n, 16) + } + + r.headerBuffer.Reset() + r.headerBuffer.WriteString(chunkSizeStr) + r.headerBuffer.WriteString(crlf) + + return io.MultiReader( + r.headerBuffer, + r.chunkBuffer, + ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go new file mode 100644 index 000000000..6785174da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package checksum + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.3.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go new file mode 100644 index 000000000..1b727acbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -0,0 +1,180 @@ +package checksum + +import ( + "github.com/aws/smithy-go/middleware" +) + +// InputMiddlewareOptions provides the options for the request +// checksum middleware setup. +type InputMiddlewareOptions struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) + + // Forces the middleware to compute the input payload's checksum. The + // request will fail if the algorithm is not specified or unable to compute + // the checksum. + RequireChecksum bool + + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload checksum will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputeSHA256PayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool +} + +// AddInputMiddleware adds the middleware for performing checksum computing +// of request payloads, and checksum validation of response payloads. +func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { + // TODO ensure this works correctly with presigned URLs + + // Middleware stack: + // * (OK)(Initialize) --none-- + // * (OK)(Serialize) EndpointResolver + // * (OK)(Build) ComputeContentLength + // * (AD)(Build) Header ComputeInputPayloadChecksum + // * SIGNED Payload - If HTTP && not support trailing checksum + // * UNSIGNED Payload - If HTTPS && not support trailing checksum + // * (RM)(Build) ContentChecksum - OK to remove + // * (OK)(Build) ComputePayloadHash + // * v4.dynamicPayloadSigningMiddleware + // * v4.computePayloadSHA256 + // * v4.unsignedPayload + // (OK)(Build) Set computedPayloadHash header + // * (OK)(Finalize) Retry + // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, + // * Requires HTTPS && support trailing checksum + // * UNSIGNED Payload + // * Finalize run if HTTPS && support trailing checksum + // * (OK)(Finalize) Signing + // * (OK)(Deserialize) --none-- + + // Initial checksum configuration look up middleware + err = stack.Initialize.Add(&setupInputContext{ + GetAlgorithm: options.GetAlgorithm, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + inputChecksum := &computeInputPayloadChecksum{ + RequireChecksum: options.RequireChecksum, + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { + return err + } + + // If trailing checksum is not supported no need for finalize handler to be added. + if options.EnableTrailingChecksum { + trailerMiddleware := &addInputChecksumTrailer{ + EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, + RequireChecksum: inputChecksum.RequireChecksum, + EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, + EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil { + return err + } + } + + return nil +} + +// RemoveInputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveInputMiddleware(stack *middleware.Stack) { + id := (*setupInputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*computeInputPayloadChecksum)(nil).ID() + stack.Finalize.Remove(id) +} + +// OutputMiddlewareOptions provides options for configuring output checksum +// validation middleware. +type OutputMiddlewareOptions struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) + + // The set of checksum algorithms that should be used for response payload + // checksum validation. The algorithm(s) used will be a union of the + // output's returned algorithms and this set. + // + // Only the first algorithm in the union is currently used. + ValidationAlgorithms []string + + // If set the middleware will ignore output multipart checksums. Otherwise + // an checksum format error will be returned by the middleware. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +// AddOutputMiddleware adds the middleware for validating response payload's +// checksum. +func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { + err := stack.Initialize.Add(&setupOutputContext{ + GetValidationMode: options.GetValidationMode, + }, middleware.Before) + if err != nil { + return err + } + + // Resolve a supported priority order list of algorithms to validate. + algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms) + + m := &validateOutputPayloadChecksum{ + Algorithms: algorithms, + IgnoreMultipartValidation: options.IgnoreMultipartValidation, + LogMultipartValidationSkipped: options.LogMultipartValidationSkipped, + LogValidationSkipped: options.LogValidationSkipped, + } + + return stack.Deserialize.Add(m, middleware.After) +} + +// RemoveOutputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveOutputMiddleware(stack *middleware.Stack) { + id := (*setupOutputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*validateOutputPayloadChecksum)(nil).ID() + stack.Deserialize.Remove(id) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go new file mode 100644 index 000000000..7ffca33f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -0,0 +1,482 @@ +package checksum + +import ( + "context" + "crypto/sha256" + "fmt" + "hash" + "io" + "strconv" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + contentMD5Header = "Content-Md5" + streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" +) + +// computedInputChecksumsKey is the metadata key for recording the algorithm the +// checksum was computed for and the checksum value. +type computedInputChecksumsKey struct{} + +// GetComputedInputChecksums returns the map of checksum algorithm to their +// computed value stored in the middleware Metadata. Returns false if no values +// were stored in the Metadata. +func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) { + vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string) + return vs, ok +} + +// SetComputedInputChecksums stores the map of checksum algorithm to their +// computed value in the middleware Metadata. Overwrites any values that +// currently exist in the metadata. +func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { + m.Set(computedInputChecksumsKey{}, vs) +} + +// computeInputPayloadChecksum middleware computes payload checksum +type computeInputPayloadChecksum struct { + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // States that a checksum is required to be included for the operation. If + // Input does not specify a checksum, fallback to built in MD5 checksum is + // used. + // + // Replaces smithy-go's ContentChecksum middleware. + RequireChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload hash will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputePayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool + + useTrailer bool +} + +type useTrailer struct{} + +// ID provides the middleware's identifier. +func (m *computeInputPayloadChecksum) ID() string { + return "AWSChecksum:ComputeInputPayloadChecksum" +} + +type computeInputHeaderChecksumError struct { + Msg string + Err error +} + +func (e computeInputHeaderChecksumError) Error() string { + const intro = "compute input header checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } + +// HandleBuild handles computing the payload's checksum, in the following cases: +// - Is HTTP, not HTTPS +// - RequireChecksum is true, and no checksums were specified via the Input +// - Trailing checksums are not supported +// +// The build handler must be inserted in the stack before ContentPayloadHash +// and after ComputeContentLength. +func (m *computeInputPayloadChecksum) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputHeaderChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + var algorithm Algorithm + var checksum string + defer func() { + if algorithm == "" || checksum == "" || err != nil { + return + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + }() + + // If no algorithm was specified, and the operation requires a checksum, + // fallback to the legacy content MD5 checksum. + algorithm, ok, err = getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } else if !ok { + if m.RequireChecksum { + checksum, err = setMD5Checksum(ctx, req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream's MD5 checksum", + Err: err, + } + } + algorithm = Algorithm("MD5") + } + return next.HandleFinalize(ctx, in) + } + + // If the checksum header is already set nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if checksum = req.Header.Get(checksumHeader); checksum != "" { + return next.HandleFinalize(ctx, in) + } + + computePayloadHash := m.EnableComputePayloadHash + if v := v4.GetPayloadHash(ctx); v != "" { + computePayloadHash = false + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + // If trailing checksums are supported, the request is HTTPS, and the + // stream is not nil or empty, instead switch to a trailing checksum. + // + // Nil and empty streams will always be handled as a request header, + // regardless if the operation supports trailing checksums or not. + if req.IsHTTPS() && !presignedurlcust.GetIsPresigning(ctx) { + if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { + if m.EnableComputePayloadHash { + // ContentSHA256Header middleware handles the header + ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) + } + m.useTrailer = true + ctx = middleware.WithStackValue(ctx, useTrailer{}, true) + return next.HandleFinalize(ctx, in) + } + + // If trailing checksums are not enabled but protocol is still HTTPS + // disabling computing the payload hash. Downstream middleware handler + // (ComputetPayloadHash) will set the payload hash to unsigned payload, + // if signing was used. + computePayloadHash = false + } + + // Only seekable streams are supported for non-trailing checksums, because + // the stream needs to be rewound before the handler can continue. + if stream != nil && !req.IsStreamSeekable() { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "unseekable stream is not supported without TLS and trailing checksum", + } + } + + var sha256Checksum string + checksum, sha256Checksum, err = computeStreamChecksum( + algorithm, stream, computePayloadHash) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream checksum", + Err: err, + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to rewind stream", + Err: err, + } + } + + req.Header.Set(checksumHeader, checksum) + + if computePayloadHash { + ctx = v4.SetPayloadHash(ctx, sha256Checksum) + } + + return next.HandleFinalize(ctx, in) +} + +type computeInputTrailingChecksumError struct { + Msg string + Err error +} + +func (e computeInputTrailingChecksumError) Error() string { + const intro = "compute input trailing checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } + +// addInputChecksumTrailer +// - Is HTTPS, not HTTP +// - A checksum was specified via the Input +// - Trailing checksums are supported. +type addInputChecksumTrailer struct { + EnableTrailingChecksum bool + RequireChecksum bool + EnableComputePayloadHash bool + EnableDecodedContentLengthHeader bool +} + +// ID identifies this middleware. +func (*addInputChecksumTrailer) ID() string { + return "addInputChecksumTrailer" +} + +// HandleFinalize wraps the request body to write the trailing checksum. +func (m *addInputChecksumTrailer) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + // Trailing checksums are only supported when TLS is enabled. + if !req.IsHTTPS() { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "HTTPS required", + } + } + + // If no algorithm was specified, there is nothing to do. + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "no algorithm specified", + } + } + + // If the checksum header is already set before finalize could run, there + // is nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if req.Header.Get(checksumHeader) != "" { + return next.HandleFinalize(ctx, in) + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + if stream == nil || streamLength == 0 { + // Nil and empty streams are handled by the Build handler. They are not + // supported by the trailing checksums finalize handler. There is no + // benefit to sending them as trailers compared to headers. + return out, metadata, computeInputTrailingChecksumError{ + Msg: "nil or empty streams are not supported", + } + } + + checksumReader, err := newComputeChecksumReader(stream, algorithm) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to created checksum reader", + Err: err, + } + } + + awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader, + func(o *awsChunkedEncodingOptions) { + o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{ + Get: checksumReader.Base64Checksum, + Length: checksumReader.Base64ChecksumLength(), + } + o.StreamLength = streamLength + }) + + for key, values := range awsChunkedReader.HTTPHeaders() { + for _, value := range values { + req.Header.Add(key, value) + } + } + + // Setting the stream on the request will create a copy. The content length + // is not updated until after the request is copied to prevent impacting + // upstream middleware. + req, err = req.SetStream(awsChunkedReader) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed updating request to trailing checksum wrapped stream", + Err: err, + } + } + req.ContentLength = awsChunkedReader.EncodedLength() + in.Request = req + + // Add decoded content length header if original stream's content length is known. + if streamLength != -1 && m.EnableDecodedContentLengthHeader { + req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10)) + } + + out, metadata, err = next.HandleFinalize(ctx, in) + if err == nil { + checksum, err := checksumReader.Base64Checksum() + if err != nil { + return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err) + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + } + + return out, metadata, err +} + +func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { + ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx) + if ctxAlgorithm == "" { + return "", false, nil + } + + algorithm, err := ParseAlgorithm(ctxAlgorithm) + if err != nil { + return "", false, fmt.Errorf( + "failed to parse algorithm, %w", err) + } + + return algorithm, true, nil +} + +func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) ( + checksum string, sha256Checksum string, err error, +) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return "", "", fmt.Errorf( + "failed to get hasher for checksum algorithm, %w", err) + } + + var sha256Hasher hash.Hash + var batchHasher io.Writer = hasher + + // Compute payload hash for the protocol. To prevent another handler + // (computePayloadSHA256) re-reading body also compute the SHA256 for + // request signing. If configured checksum algorithm is SHA256, don't + // double wrap stream with another SHA256 hasher. + if computePayloadHash && algorithm != AlgorithmSHA256 { + sha256Hasher = sha256.New() + batchHasher = io.MultiWriter(hasher, sha256Hasher) + } + + if stream != nil { + if _, err = io.Copy(batchHasher, stream); err != nil { + return "", "", fmt.Errorf( + "failed to read stream to compute hash, %w", err) + } + } + + checksum = string(base64EncodeHashSum(hasher)) + if computePayloadHash { + if algorithm != AlgorithmSHA256 { + sha256Checksum = string(hexEncodeHashSum(sha256Hasher)) + } else { + sha256Checksum = string(hexEncodeHashSum(hasher)) + } + } + + return checksum, sha256Checksum, nil +} + +func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { + if v := req.ContentLength; v > 0 { + return v, nil + } + + if length, ok, err := req.StreamLength(); err != nil { + return 0, fmt.Errorf("failed getting request stream's length, %w", err) + } else if ok { + return length, nil + } + + return -1, nil +} + +// setMD5Checksum computes the MD5 of the request payload and sets it to the +// Content-MD5 header. Returning the MD5 base64 encoded string or error. +// +// If the MD5 is already set as the Content-MD5 header, that value will be +// returned, and nothing else will be done. +// +// If the payload is empty, no MD5 will be computed. No error will be returned. +// Empty payloads do not have an MD5 value. +// +// Replaces the smithy-go middleware for httpChecksum trait. +func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return v, nil + } + stream := req.GetStream() + if stream == nil { + return "", nil + } + + if !req.IsStreamSeekable() { + return "", fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return "", err + } + if err := req.RewindStream(); err != nil { + return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) + } + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + return string(v), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go new file mode 100644 index 000000000..3db73afe7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -0,0 +1,98 @@ +package checksum + +import ( + "context" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// setupChecksumContext is the initial middleware that looks up the input +// used to configure checksum behavior. This middleware must be executed before +// input validation step or any other checksum middleware. +type setupInputContext struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupInputContext) ID() string { + return "AWSChecksum:SetupInputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupInputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation algorithm is specified. + if m.GetAlgorithm != nil { + // check is input resource has a checksum algorithm + algorithm, ok := m.GetAlgorithm(in.Parameters) + if ok && len(algorithm) != 0 { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) + } + } + + return next.HandleInitialize(ctx, in) +} + +type setupOutputContext struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupOutputContext) ID() string { + return "AWSChecksum:SetupOutputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupOutputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation mode is specified. + if m.GetValidationMode != nil { + // check is input resource has a checksum algorithm + mode, ok := m.GetValidationMode(in.Parameters) + if ok && len(mode) != 0 { + ctx = setContextOutputValidationMode(ctx, mode) + } + } + + return next.HandleInitialize(ctx, in) +} + +// outputValidationModeKey is the key set on context used to identify if +// output checksum validation is enabled. +type outputValidationModeKey struct{} + +// setContextOutputValidationMode sets the request checksum +// algorithm on the context. +// +// Scoped to stack values. +func setContextOutputValidationMode(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, outputValidationModeKey{}, value) +} + +// getContextOutputValidationMode returns response checksum validation state, +// if one was specified. Empty string is returned if one is not specified. +// +// Scoped to stack values. +func getContextOutputValidationMode(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go new file mode 100644 index 000000000..9fde12d86 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -0,0 +1,131 @@ +package checksum + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms +// that were used, by the middleware's validation. +type outputValidationAlgorithmsUsedKey struct{} + +// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used +// stored in the middleware Metadata. Returns false if no algorithms were +// stored in the Metadata. +func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) { + vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string) + return vs, ok +} + +// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the +// middleware Metadata. +func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) { + m.Set(outputValidationAlgorithmsUsedKey{}, vs) +} + +// validateOutputPayloadChecksum middleware computes payload checksum of the +// received response and validates with checksum returned by the service. +type validateOutputPayloadChecksum struct { + // Algorithms represents a priority-ordered list of valid checksum + // algorithm that should be validated when present in HTTP response + // headers. + Algorithms []Algorithm + + // IgnoreMultipartValidation indicates multipart checksums ending with "-#" + // will be ignored. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +func (m *validateOutputPayloadChecksum) ID() string { + return "AWSChecksum:ValidateOutputPayloadChecksum" +} + +// HandleDeserialize is a Deserialize middleware that wraps the HTTP response +// body with an io.ReadCloser that will validate the its checksum. +func (m *validateOutputPayloadChecksum) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + // If there is no validation mode specified nothing is supported. + if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown transport type %T", out.RawResponse), + } + } + + var expectedChecksum string + var algorithmToUse Algorithm + for _, algorithm := range m.Algorithms { + value := response.Header.Get(AlgorithmHTTPHeader(algorithm)) + if len(value) == 0 { + continue + } + + expectedChecksum = value + algorithmToUse = algorithm + } + + // TODO this must validate the validation mode is set to enabled. + + logger := middleware.GetLogger(ctx) + + // Skip validation if no checksum algorithm or checksum is available. + if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { + if m.LogValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, + "Response has no supported checksum. Not validating response payload.") + } + return out, metadata, nil + } + + // Ignore multipart validation + if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") { + if m.LogMultipartValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, "Skipped validation of multipart checksum.") + } + return out, metadata, nil + } + + body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum) + if err != nil { + return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err) + } + response.Body = body + + // Update the metadata to include the set of the checksum algorithms that + // will be validated. + SetOutputValidationAlgorithmsUsed(&metadata, []string{ + string(algorithmToUse), + }) + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md new file mode 100644 index 000000000..150e26f4e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -0,0 +1,308 @@ +# v1.17.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.3 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.1 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-03-21) + +* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.6.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 000000000..ec290b213 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,53 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if isFIPS(a.Region) { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} + +func isFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go new file mode 100644 index 000000000..06e1a3add --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go @@ -0,0 +1,85 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) { + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go new file mode 100644 index 000000000..9a3258e15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go @@ -0,0 +1,32 @@ +package arn + +import "fmt" + +// arnable is implemented by the relevant S3/S3Control +// operations which have members that may need ARN +// processing. +type arnable interface { + SetARNMember(string) error + GetARNMember() (*string, bool) +} + +// GetARNField would be called during middleware execution +// to retrieve a member value that is an ARN in need of +// processing. +func GetARNField(input interface{}) (*string, bool) { + v, ok := input.(arnable) + if !ok { + return nil, false + } + return v.GetARNMember() +} + +// SetARNField would called during middleware exeuction +// to set a member value that required ARN processing. +func SetARNField(input interface{}, v string) error { + params, ok := input.(arnable) + if !ok { + return fmt.Errorf("Params does not contain arn field member") + } + return params.SetARNMember(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 000000000..e06a30285 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,128 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if isFIPS(a.Region) { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 000000000..513154cc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go new file mode 100644 index 000000000..b51532085 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go @@ -0,0 +1,73 @@ +package s3shared + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// ARNLookup is the initial middleware that looks up if an arn is provided. +// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on +// middleware context. This middleware must be executed before input validation step or any other +// arn processing middleware. +type ARNLookup struct { + + // GetARNValue takes in a input interface and returns a ptr to string and a bool + GetARNValue func(interface{}) (*string, bool) +} + +// ID for the middleware +func (m *ARNLookup) ID() string { + return "S3Shared:ARNLookup" +} + +// HandleInitialize handles the behavior of this initialize step +func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // check if GetARNValue is supported + if m.GetARNValue == nil { + return next.HandleInitialize(ctx, in) + } + + // check is input resource is an ARN; if not go to next + v, ok := m.GetARNValue(in.Parameters) + if !ok || v == nil || !arn.IsARN(*v) { + return next.HandleInitialize(ctx, in) + } + + // if ARN process ResourceRequest and put it on ctx + av, err := arn.Parse(*v) + if err != nil { + return out, metadata, fmt.Errorf("error parsing arn: %w", err) + } + // set parsed arn on context + ctx = setARNResourceOnContext(ctx, av) + + return next.HandleInitialize(ctx, in) +} + +// arnResourceKey is the key set on context used to identify, retrive an ARN resource +// if present on the context. +type arnResourceKey struct{} + +// SetARNResourceOnContext sets the S3 ARN on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context { + return middleware.WithStackValue(ctx, arnResourceKey{}, value) +} + +// GetARNResourceFromContext returns an ARN from context and a bool indicating +// presence of ARN on ctx. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) { + v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go new file mode 100644 index 000000000..b5d31f5c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go @@ -0,0 +1,41 @@ +package config + +import "context" + +// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion +type UseARNRegionProvider interface { + GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) +} + +// DisableMultiRegionAccessPointsProvider is an interface for retrieving external configuration value for DisableMultiRegionAccessPoints +type DisableMultiRegionAccessPointsProvider interface { + GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value bool, found bool, err error) +} + +// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseARNRegionProvider); ok { + value, found, err = p.GetS3UseARNRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ResolveDisableMultiRegionAccessPoints extracts the first instance of a DisableMultiRegionAccessPoints from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveDisableMultiRegionAccessPoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(DisableMultiRegionAccessPointsProvider); ok { + value, found, err = p.GetS3DisableMultiRegionAccessPoints(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go new file mode 100644 index 000000000..aa0c3714e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go @@ -0,0 +1,183 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// TODO: fix these error statements to be relevant to v2 sdk + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARN error string +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + msg := invalidARNErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + + return msg +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) Unwrap() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + msg := configurationErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + return msg +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) Unwrap() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go new file mode 100644 index 000000000..a1f30ee06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3shared + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.17.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go new file mode 100644 index 000000000..85b60d2a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go @@ -0,0 +1,29 @@ +package s3shared + +import ( + "github.com/aws/smithy-go/middleware" +) + +// hostID is used to retrieve host id from response metadata +type hostID struct { +} + +// SetHostIDMetadata sets the provided host id over middleware metadata +func SetHostIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(hostID{}, id) +} + +// GetHostIDMetadata retrieves the host id from middleware metadata +// returns host id as string along with a boolean indicating presence of +// hostId on middleware metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(hostID{}) { + return "", false + } + + v, ok := metadata.Get(hostID{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go new file mode 100644 index 000000000..f02604cb6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go @@ -0,0 +1,28 @@ +package s3shared + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// clonedInputKey used to denote if request input was cloned. +type clonedInputKey struct{} + +// SetClonedInputKey sets a key on context to denote input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetClonedInputKey(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, clonedInputKey{}, value) +} + +// IsClonedInput retrieves if context key for cloned input was set. +// If set, we can infer that the reuqest input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func IsClonedInput(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go new file mode 100644 index 000000000..f52f2f11e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -0,0 +1,52 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const metadataRetrieverID = "S3MetadataRetriever" + +// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware +func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error { + // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as + // host id, request id from response header returned by operation deserializers + return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before) +} + +type metadataRetriever struct { +} + +// ID returns the middleware identifier +func (m *metadataRetriever) ID() string { + return metadataRetrieverID +} + +func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // check for header for Request id + if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { + // set reqID on metadata for successful responses. + awsmiddleware.SetRequestIDMetadata(&metadata, v) + } + + // look up host-id + if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { + // set reqID on metadata for successful responses. + SetHostIDMetadata(&metadata, v) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go new file mode 100644 index 000000000..bee8da3fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go @@ -0,0 +1,77 @@ +package s3shared + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// ResourceRequest represents an ARN resource and api request metadata +type ResourceRequest struct { + Resource arn.Resource + // RequestRegion is the region configured on the request config + RequestRegion string + + // SigningRegion is the signing region resolved for the request + SigningRegion string + + // PartitionID is the resolved partition id for the provided request region + PartitionID string + + // UseARNRegion indicates if client should use the region provided in an ARN resource + UseARNRegion bool + + // UseFIPS indicates if te client is configured for FIPS + UseFIPS bool +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +// +// Deprecated: FIPS will not be present in the ARN region +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return r.UseARNRegion +} + +// IsCrossPartition returns true if request is configured for region of another partition, than +// the partition that resource ARN region resolves to. IsCrossPartition will not return an error, +// if request is not configured with a specific partition id. This might happen if customer provides +// custom endpoint url, but does not associate a partition id with it. +func (r ResourceRequest) IsCrossPartition() (bool, error) { + rv := r.PartitionID + if len(rv) == 0 { + return false, nil + } + + av := r.Resource.GetARN().Partition + if len(av) == 0 { + return false, fmt.Errorf("no partition id for provided ARN") + } + + return !strings.EqualFold(rv, av), nil +} + +// IsCrossRegion returns true if request signing region is not same as arn region +func (r ResourceRequest) IsCrossRegion() bool { + v := r.SigningRegion + return !strings.EqualFold(v, r.Resource.GetARN().Region) +} + +// IsFIPS returns true if region is a fips pseudo-region +// +// Deprecated: FIPS should be specified via EndpointOptions. +func IsFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || + strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go new file mode 100644 index 000000000..857336243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go @@ -0,0 +1,33 @@ +package s3shared + +import ( + "errors" + "fmt" + + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *awshttp.ResponseError + + // HostID associated with response error + HostID string +} + +// ServiceHostID returns the host id associated with Response Error +func (e *ResponseError) ServiceHostID() string { return e.HostID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v", + e.Response.StatusCode, e.RequestID, e.HostID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.S3 HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go new file mode 100644 index 000000000..543576245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go @@ -0,0 +1,60 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before) +} + +type errorWrapper struct { +} + +// ID returns the middleware identifier +func (m *errorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + // look for host id in metadata + hostID, _ := GetHostIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &awshttp.ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + }, + HostID: hostID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go new file mode 100644 index 000000000..0f43ec0d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go @@ -0,0 +1,54 @@ +package s3shared + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const s3100ContinueID = "S3100Continue" +const default100ContinueThresholdBytes int64 = 1024 * 1024 * 2 + +// Add100Continue add middleware, which adds {Expect: 100-continue} header for s3 client HTTP PUT request larger than 2MB +// or with unknown size streaming bodies, during operation builder step +func Add100Continue(stack *middleware.Stack, continueHeaderThresholdBytes int64) error { + return stack.Build.Add(&s3100Continue{ + continueHeaderThresholdBytes: continueHeaderThresholdBytes, + }, middleware.After) +} + +type s3100Continue struct { + continueHeaderThresholdBytes int64 +} + +// ID returns the middleware identifier +func (m *s3100Continue) ID() string { + return s3100ContinueID +} + +func (m *s3100Continue) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + sizeLimit := default100ContinueThresholdBytes + switch { + case m.continueHeaderThresholdBytes == -1: + return next.HandleBuild(ctx, in) + case m.continueHeaderThresholdBytes > 0: + sizeLimit = m.continueHeaderThresholdBytes + default: + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + if req.ContentLength == -1 || (req.ContentLength == 0 && req.Body != nil) || req.ContentLength >= sizeLimit { + req.Header.Set("Expect", "100-continue") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go new file mode 100644 index 000000000..22773199f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go @@ -0,0 +1,78 @@ +package s3shared + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" +) + +// EnableDualstack represents middleware struct for enabling dualstack support +// +// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support +type EnableDualstack struct { + // UseDualstack indicates if dualstack endpoint resolving is to be enabled + UseDualstack bool + + // DefaultServiceID is the service id prefix used in endpoint resolving + // by default service-id is 's3' and 's3-control' for service s3, s3control. + DefaultServiceID string +} + +// ID returns the middleware ID. +func (*EnableDualstack) ID() string { + return "EnableDualstack" +} + +// HandleSerialize handles serializer middleware behavior when middleware is executed +func (u *EnableDualstack) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + // check for host name immutable property + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + serviceID := awsmiddle.GetServiceID(ctx) + + // s3-control may be represented as `S3 Control` as in model + if serviceID == "S3 Control" { + serviceID = "s3-control" + } + + if len(serviceID) == 0 { + // default service id + serviceID = u.DefaultServiceID + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + if u.UseDualstack { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host) + } + + for i := 0; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], serviceID) { + parts[i] = parts[i] + ".dualstack" + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go new file mode 100644 index 000000000..65fd07e00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go @@ -0,0 +1,89 @@ +package s3shared + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` +} + +// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body +func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents ErrorComponents + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + return errComponents, nil +} + +// GetWrappedErrorResponseComponents returns the error fields from an xml error response body +// in which error code, and message are wrapped by a tag +func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + } + + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + + return ErrorComponents{ + Code: errComponents.Code, + Message: errComponents.Message, + RequestID: errComponents.RequestID, + HostID: errComponents.HostID, + }, nil +} + +// GetErrorResponseComponents retrieves error components according to passed in options +func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) { + var errComponents ErrorComponents + var err error + + if options.IsWrappedWithErrorTag { + errComponents, err = GetWrappedErrorResponseComponents(r) + } else { + errComponents, err = GetUnwrappedErrorResponseComponents(r) + } + + if err != nil { + return ErrorComponents{}, err + } + + // If an error code or message is not retrieved, it is derived from the http status code + // eg, for S3 service, we derive err code and message, if none is found + if options.UseStatusCode && len(errComponents.Code) == 0 && + len(errComponents.Message) == 0 { + // derive code and message from status code + statusText := http.StatusText(options.StatusCode) + errComponents.Code = strings.Replace(statusText, " ", "", -1) + errComponents.Message = statusText + } + return errComponents, nil +} + +// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service +type ErrorResponseDeserializerOptions struct { + // UseStatusCode denotes if status code should be used to retrieve error code, msg + UseStatusCode bool + + // StatusCode is status code of error response + StatusCode int + + //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an + // additional tag + IsWrappedWithErrorTag bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md new file mode 100644 index 000000000..ed5204688 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -0,0 +1,544 @@ +# v1.53.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.0 (2024-03-18) + +* **Feature**: Fix two issues with response root node names. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.52.1 (2024-03-15) + +* **Documentation**: Documentation updates for Amazon S3. + +# v1.52.0 (2024-03-13) + +* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT). + +# v1.51.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.3 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.2 (2024-03-04) + +* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.50.3 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.2 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.50.1 (2024-02-19) + +* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials. + +# v1.50.0 (2024-02-16) + +* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. + +# v1.49.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.1 (2024-01-24) + +* No change notes available for this release. + +# v1.48.0 (2024-01-05) + +* **Feature**: Support smithy sigv4a trait for codegen. + +# v1.47.8 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.7 (2023-12-20) + +* No change notes available for this release. + +# v1.47.6 (2023-12-18) + +* No change notes available for this release. + +# v1.47.5 (2023-12-08) + +* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver. +* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios. +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.47.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.47.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.46.0 (2023-11-28.2) + +* **Feature**: Add S3Express support. +* **Feature**: Adds support for S3 Express One Zone. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.45.1 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.45.0 (2023-11-27) + +* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality. + +# v1.44.0 (2023-11-21) + +* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs. +* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators. + +# v1.43.1 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.43.0 (2023-11-17) + +* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. +* **Feature**: Removes all default 0 values for numbers and false values for booleans + +# v1.42.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.42.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.42.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.0 (2023-09-26) + +* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. + +# v1.39.0 (2023-09-20) + +* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException + +# v1.38.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.1 (2023-08-01) + +* No change notes available for this release. + +# v1.38.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2023-07-13) + +* **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.0 (2023-06-28) + +* **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. + +# v1.35.0 (2023-06-16) + +* **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. + +# v1.34.1 (2023-06-15) + +* No change notes available for this release. + +# v1.34.0 (2023-06-13) + +* **Feature**: Integrate double encryption feature to SDKs. +* **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.1 (2023-05-04) + +* **Documentation**: Documentation updates for Amazon S3 + +# v1.33.0 (2023-04-24) + +* **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2023-04-19) + +* **Feature**: Provides support for "Snow" Storage class. + +# v1.31.3 (2023-04-10) + +* No change notes available for this release. + +# v1.31.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.1 (2023-03-31) + +* **Documentation**: Documentation updates for Amazon S3 + +# v1.31.0 (2023-03-21) + +* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.30.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2023-02-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2023-01-23) + +* No change notes available for this release. + +# v1.30.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.29.6 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.5 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.4 (2022-11-22) + +* No change notes available for this release. + +# v1.29.3 (2022-11-16) + +* No change notes available for this release. + +# v1.29.2 (2022-11-10) + +* No change notes available for this release. + +# v1.29.1 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2022-10-21) + +* **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. +* **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2022-10-19) + +* **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. + +# v1.27.11 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2022-08-30) + +* No change notes available for this release. + +# v1.27.6 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.5 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.2 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2022-07-01) + +* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). + +# v1.26.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.11 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.10 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.9 (2022-05-06) + +* No change notes available for this release. + +# v1.26.8 (2022-05-03) + +* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. + +# v1.26.7 (2022-04-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.6 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2022-04-12) + +* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# v1.26.4 (2022-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2022-01-28) + +* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. + +# v1.24.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.21.0 (2021-12-02) + +* **Feature**: API client updated +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.19.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2021-11-12) + +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. + +# v1.18.0 (2021-11-06) + +* **Feature**: Support has been added for the SelectObjectContent API. +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2021-09-10) + +* No change notes available for this release. + +# v1.15.0 (2021-09-02) + +* **Feature**: API client updated +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2021-08-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-08-04) + +* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. +* **Feature**: Updated service client to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-05-25) + +* **Feature**: API client updated + +# v1.8.0 (2021-05-20) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Feature**: Updated to latest service API model. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go new file mode 100644 index 000000000..a31c2e0ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -0,0 +1,898 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "S3" +const ServiceAPIVersion = "2006-03-01" + +// Client provides the API client to make operations call for Amazon Simple +// Storage Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveEndpointResolverV2(&options) + + resolveHTTPSignerV4a(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + resolveExpressCredentials(&options) + + finalizeServiceEndpointAuthResolver(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + finalizeExpressCredentials(&options, client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + setSafeEventStreamClientLogMode(&options, opID) + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + finalizeOperationExpressCredentials(&options, *c) + + finalizeOperationEndpointAuthResolver(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{ + Signer: options.httpSignerV4a, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseARNRegion(cfg, &opts) + resolveDisableMultiRegionAccessPoints(cfg, &opts) + resolveDisableExpressAuth(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + so.DisableURIPathEscaping = true + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves UseARNRegion S3 configuration +func resolveUseARNRegion(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.UseARNRegion = value + } + return nil +} + +// resolves DisableMultiRegionAccessPoints S3 configuration +func resolveDisableMultiRegionAccessPoints(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := s3sharedconfig.ResolveDisableMultiRegionAccessPoints(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.DisableMultiRegionAccessPoints = value + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +type httpSignerV4a interface { + SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, + service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions)) error +} + +func resolveHTTPSignerV4a(o *Options) { + if o.httpSignerV4a != nil { + return + } + o.httpSignerV4a = newDefaultV4aSigner(*o) +} + +func newDefaultV4aSigner(o Options) *v4a.Signer { + return v4a.NewSigner(func(so *v4a.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { + return s3shared.AddMetadataRetrieverMiddleware(stack) +} + +func add100Continue(stack *middleware.Stack, options Options) error { + return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +// ComputedInputChecksumsMetadata provides information about the algorithms used +// to compute the checksum(s) of the input payload. +type ComputedInputChecksumsMetadata struct { + // ComputedChecksums is a map of algorithm name to checksum value of the computed + // input payload's checksums. + ComputedChecksums map[string]string +} + +// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of +// algorithms and input payload checksums values. +func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) { + values, ok := internalChecksum.GetComputedInputChecksums(m) + if !ok { + return ComputedInputChecksumsMetadata{}, false + } + return ComputedInputChecksumsMetadata{ + ComputedChecksums: values, + }, true + +} + +// ChecksumValidationMetadata contains metadata such as the checksum algorithm +// used for data integrity validation. +type ChecksumValidationMetadata struct { + // AlgorithmsUsed is the set of the checksum algorithms used to validate the + // response payload. The response payload must be completely read in order for the + // checksum validation to be performed. An error is returned by the operation + // output's response io.ReadCloser if the computed checksums are invalid. + AlgorithmsUsed []string +} + +// GetChecksumValidationMetadata returns the set of algorithms that will be used +// to validate the response payload with. The response payload must be completely +// read in order for the checksum validation to be performed. An error is returned +// by the operation output's response io.ReadCloser if the computed checksums are +// invalid. Returns false if no checksum algorithm used metadata was found. +func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { + values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) + if !ok { + return ChecksumValidationMetadata{}, false + } + return ChecksumValidationMetadata{ + AlgorithmsUsed: append(make([]string, 0, len(values)), values...), + }, true + +} + +// nopGetBucketAccessor is no-op accessor for operation that don't support bucket +// member as input +func nopGetBucketAccessor(input interface{}) (*string, bool) { + return nil, false +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return s3shared.AddResponseErrorMiddleware(stack) +} + +func disableAcceptEncodingGzip(stack *middleware.Stack) error { + return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value and the deserialized RequestID. +type ResponseError interface { + error + + ServiceHostID() string + ServiceRequestID() string +} + +var _ ResponseError = (*s3shared.ResponseError)(nil) + +// GetHostIDMetadata retrieves the host id from middleware metadata returns host +// id as string along with a boolean indicating presence of hostId on middleware +// metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + return s3shared.GetHostIDMetadata(metadata) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// httpPresignerV4a represents sigv4a presigner interface used by presign url +// client +type httpPresignerV4a interface { + PresignHTTP( + ctx context.Context, credentials v4a.Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // presignerV4a is the presigner used by the presign url client + presignerV4a httpPresignerV4a +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// WithPresignExpires is a helper utility to append Expires value on presign +// options optional function +func WithPresignExpires(dur time.Duration) func(*PresignOptions) { + return withPresignExpires(dur).options +} + +type withPresignExpires time.Duration + +func (w withPresignExpires) options(o *PresignOptions) { + o.Expires = time.Duration(w) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + if options.presignerV4a == nil { + options.presignerV4a = newDefaultV4aSigner(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignContextPolyfillMiddleware struct { +} + +func (*presignContextPolyfillMiddleware) ID() string { + return "presignContextPolyfill" +} + +func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + schemeID := rscheme.Scheme.SchemeID() + ctx = s3cust.SetSignerVersion(ctx, schemeID) + if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { + if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr) + } + } else if schemeID == "aws.auth#sigv4a" { + if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) + } + } + + return next.HandleFinalize(ctx, in) +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + } + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + + // extended s3 presigning + signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + ExpressCredentials: options.ExpressCredentials, + V4Presigner: c.Presigner, + V4aPresigner: c.presignerV4a, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = s3cust.RegisterPreSigningMiddleware(stack, signermv) + if err != nil { + return err + } + + if c.Expires < 0 { + return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires) + } + // add middleware to set expiration for s3 presigned url, if expiration is set to + // 0, this middleware sets a default expiration of 900 seconds + err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go new file mode 100644 index 000000000..c71060e08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -0,0 +1,280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads are +// currently in progress, those part uploads might or might not succeed. As a +// result, it might be necessary to abort a given multipart upload multiple times +// in order to completely free all storage consumed by all parts. To verify that +// all parts have been removed and prevent getting charged for the part storage, +// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// API operation and ensure that the parts list is empty. Directory buckets - For +// directory buckets, you must make requests for this API operation to the Zonal +// endpoint. These endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to AbortMultipartUpload : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { + if params == nil { + params = &AbortMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AbortMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AbortMultipartUploadInput struct { + + // The bucket name to which the upload was taking place. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID that identifies the multipart upload. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type AbortMultipartUploadOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AbortMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *AbortMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AbortMultipartUpload", + } +} + +// getAbortMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*AbortMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getAbortMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go new file mode 100644 index 000000000..8f89d780e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -0,0 +1,460 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Completes a multipart upload by assembling previously uploaded parts. You first +// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. After successfully uploading all relevant parts of an upload, you +// call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending order +// by part number to create a new object. In the CompleteMultipartUpload request, +// you must provide the parts list and ensure that the parts list is complete. The +// CompleteMultipartUpload API operation concatenates the parts that you provide in +// the list. For each part in the list, you must provide the PartNumber value and +// the ETag value that are returned after that part was uploaded. The processing +// of a CompleteMultipartUpload request could take several minutes to finalize. +// After Amazon S3 begins processing the request, it sends an HTTP response header +// that specifies a 200 OK response. While processing is in progress, Amazon S3 +// periodically sends white space characters to keep the connection from timing +// out. A request could fail after the initial 200 OK response has been sent. This +// means that a 200 OK response can contain either a success or an error. The +// error response might be embedded in the 200 OK response. If you call this API +// operation directly, make sure to design your application to parse the contents +// of the response and handle it appropriately. If you use Amazon Web Services +// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +// error handling per your configuration settings (including automatically retrying +// the request as appropriate). If the condition persists, the SDKs throw an +// exception (or, for the SDKs that don't use exceptions, they return an error). +// Note that if CompleteMultipartUpload fails, applications should be prepared to +// retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) +// . You can't use Content-Type: application/x-www-form-urlencoded for the +// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type +// header, CompleteMultipartUpload can still return a 200 OK response. For more +// information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Special errors +// - Error Code: EntityTooSmall +// - Description: Your proposed upload is smaller than the minimum allowed +// object size. Each part must be at least 5 MB in size, except the last part. +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPart +// - Description: One or more of the specified parts could not be found. The +// part might not have been uploaded, or the specified ETag might not have matched +// the uploaded part's ETag. +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPartOrder +// - Description: The list of parts was not in ascending order. The parts list +// must be specified in order by part number. +// - HTTP Status Code: 400 Bad Request +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CompleteMultipartUpload : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { + if params == nil { + params = &CompleteMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CompleteMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CompleteMultipartUploadInput struct { + + // Name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // ID for the initiated multipart upload. + // + // This member is required. + UploadId *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The container for the multipart upload request information. + MultipartUpload *types.CompletedMultipartUpload + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is required only when the object was created using a checksum + // algorithm or if your bucket policy requires the use of SSE-C. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type CompleteMultipartUploadOutput struct { + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is an + // opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will contain + // one or more nonhexadecimal characters and/or will consist of less than 32 or + // more than 32 hexadecimal digits. For more information about how the entity tag + // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string + + // If the object expiration is configured, this will contain the expiration date ( + // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // This functionality is not supported for directory buckets. + Expiration *string + + // The object key of the newly created object. + Key *string + + // The URI that identifies the newly created object. + Location *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. This functionality is not supported for directory buckets. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CompleteMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CompleteMultipartUpload", + } +} + +// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CompleteMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCompleteMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go new file mode 100644 index 000000000..c7990bab1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -0,0 +1,779 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a copy of an object that is already stored in Amazon S3. You can store +// individual objects of up to 5 TB in Amazon S3. You create a copy of your object +// up to 5 GB in size in a single atomic action using this API. However, to copy an +// object greater than 5 GB, you must use the multipart upload Upload Part - Copy +// (UploadPartCopy) API. For more information, see Copy Object Using the REST +// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) +// . You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Both the Region that you want to copy the object +// from and the Region that you want to copy the object to must be enabled for your +// account. For more information about how to enable a Region for your account, see +// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. Amazon S3 transfer +// acceleration does not support cross-Region copies. If you request a cross-Region +// copy using a transfer acceleration endpoint, you get a 400 Bad Request error. +// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// . Authentication and authorization All CopyObject requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see REST +// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. Amazon +// Web Services CLI or SDKs handles authentication and authorization on your +// behalf. Permissions You must have read access to the source object and write +// access to the destination bucket. +// - General purpose bucket permissions - You must have permissions in an IAM +// policy based on the source and destination bucket types in a CopyObject +// operation. +// - If the source object is in a general purpose bucket, you must have +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have +// s3:PutObject permission to write the object copy to the destination bucket. +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in a CopyObject operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object. By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key can't +// be set to ReadOnly on the copy destination bucket. For example policies, see +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// Response and special errors When the request is an HTTP 1.1 request, the +// response is chunk encoded. When the request is not an HTTP 1.1 request, the +// response would not contain the Content-Length . You always need to read the +// entire response body to check if the copy succeeds. to keep the connection alive +// while we copy the data. +// - If the copy is successful, you receive a response with information about +// the copied object. +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can contain +// either a success or an error. +// - If the error occurs before the copy action starts, you receive a standard +// Amazon S3 error. +// - If the error occurs during the copy operation, the error response is +// embedded in the 200 OK response. For example, in a cross-region copy, you may +// encounter throttling and receive a 200 OK response. For more information, see +// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror) +// . The 200 OK status code means the copy was accepted, but it doesn't mean the +// copy is complete. Another example is when you disconnect from Amazon S3 before +// the copy is complete, Amazon S3 might cancel the copy and you may receive a +// 200 OK response. You must stay connected to Amazon S3 until the entire +// response is successfully received and processed. If you call this API operation +// directly, make sure to design your application to parse the content of the +// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs +// handle this condition. The SDKs detect the embedded error and apply error +// handling per your configuration settings (including automatically retrying the +// request as appropriate). If the condition persists, the SDKs throw an exception +// (or, for the SDKs that don't use exceptions, they return an error). +// +// Charge The copy request charge is based on the storage class and Region that +// you specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. If the copy source is in a different region, the data transfer is +// billed to the copy source account. For pricing information, see Amazon S3 +// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory +// buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CopyObject : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { + if params == nil { + params = &CopyObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CopyObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CopyObjectInput struct { + + // The name of the destination bucket. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. The source object can be up + // to 5 GB. If the source object is an object that was uploaded by using a + // multipart upload, the object copy will be a single part object after the source + // object is copied to the destination bucket. You specify the value of the copy + // source in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) + // : + // - For objects not accessed through an access point, specify the name of the + // source bucket and the key of the source object, separated by a slash (/). For + // example, to copy the object reports/january.pdf from the general purpose + // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value + // must be URL-encoded. To copy the object reports/january.pdf from the directory + // bucket awsexamplebucket--use1-az5--x-s3 , use + // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be + // URL-encoded. + // - For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/ . For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + // . The value must be URL-encoded. + // If your source bucket versioning is enabled, the x-amz-copy-source header by + // default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. To + // copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + // ). If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. If you enable versioning on the destination bucket, Amazon S3 + // generates a unique version ID for the copied object. This version ID is + // different from the version ID of the source object. Amazon S3 returns the + // version ID of the copied object in the x-amz-version-id response header in the + // response. If you do not enable versioning or suspend it on the destination + // bucket, the version ID that Amazon S3 generates in the x-amz-version-id + // response header is always null. Directory buckets - S3 Versioning isn't enabled + // and supported for directory buckets. + // + // This member is required. + CopySource *string + + // The key of the destination object. + // + // This member is required. + Key *string + + // The canned access control list (ACL) to apply to the object. When you copy an + // object, the ACL metadata is not preserved and is set to private by default. + // Only the owner has full access control. To override the default ACL setting, + // specify a new ACL when you generate a copy request. For more information, see + // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . If the destination bucket that you're copying objects to uses the bucket owner + // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, + // such as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will be owned + // by the bucket owner. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t + // affect bucket-level settings for S3 Bucket Key. For more information, see + // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. + BucketKeyEnabled *bool + + // Specifies the caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. When you copy an object, if the source object has a + // checksum, that checksum value will be copied to the new object by default. If + // the CopyObject request does not include this x-amz-checksum-algorithm header, + // the checksum algorithm will be copied from the source object to the destination + // object (if it's present on the source object). You can optionally specify a + // different checksum algorithm to use with the x-amz-checksum-algorithm header. + // Unrecognized or unsupported values will respond with the HTTP status code 400 + // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, + // CRC32 is the default checksum algorithm that's used for performance. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // A standard MIME type that describes the format of the object data. + ContentType *string + + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. If both the + // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers + // are present in the request and evaluate as follows, Amazon S3 returns the 412 + // Precondition Failed response code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfUnmodifiedSince *time.Time + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, + // you must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. This functionality is not + // supported when the source object is in a directory bucket. + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be the same + // one that was used when the source object was created. If the source object for + // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary + // encryption information in your request so that Amazon S3 can decrypt the object + // for copying. This functionality is not supported when the source object is in a + // directory bucket. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. If the source object for the copy + // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption + // information in your request so that Amazon S3 can decrypt the object for + // copying. This functionality is not supported when the source object is in a + // directory bucket. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata that's provided in the request. When copying an object, you can + // preserve all metadata (the default) or specify new metadata. If this header + // isn’t specified, COPY is the default behavior. General purpose bucket - For + // general purpose buckets, when you grant permissions, you can use the + // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior + // when objects are uploaded. For more information, see Amazon S3 condition key + // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each + // object and is not copied when using the x-amz-metadata-directive header. To + // copy the value, you must specify x-amz-website-redirect-location in the request + // header. + MetadataDirective types.MetadataDirective + + // Specifies whether you want to apply a legal hold to the object copy. This + // functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to the object copy. This + // functionality is not supported for directory buckets. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want the Object Lock of the object copy to expire. + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded. Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported when the destination bucket is a directory bucket. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. This functionality + // is not supported when the destination bucket is a directory bucket. + SSEKMSEncryptionContext *string + + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. All GET and PUT requests for an object protected by KMS will fail if + // they're not made via SSL or using SigV4. For information about configuring any + // of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. Amazon S3 automatically encrypts all new objects that are copied to an + // S3 bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set to the + // default encryption configuration of the destination bucket. By default, all + // buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a + // default encryption configuration that uses server-side encryption with Key + // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with + // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS + // key, or a customer-provided key to encrypt the target object copy. When you + // perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // With server-side encryption, Amazon S3 encrypts your data as it writes your data + // to disks in its data centers and decrypts the data when you access it. For more + // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. For directory buckets, only server-side encryption + // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // If the x-amz-storage-class header is not used, the copied object will be stored + // in the STANDARD Storage Class by default. The STANDARD storage class provides + // high durability and high availability. Depending on performance needs, you can + // specify a different Storage Class. + // - Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported storage + // class values won't write a destination object and will respond with the HTTP + // status code 400 Bad Request . + // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // You can use the CopyObject action to change the storage class of an object that + // is already stored in Amazon S3 by using the x-amz-storage-class header. For + // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. Before using an object as a source object for the + // copy operation, you must restore a copy of it if it meets any of the following + // conditions: + // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . + // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 + // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access . + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) + // in the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object copy in the destination bucket. This value must be + // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for + // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive + // , you don't need to set the x-amz-tagging header, because the tag-set will be + // copied from the source object directly. The tag-set must be encoded as URL Query + // parameters. The default value is the empty value. Directory buckets - For + // directory buckets in a CopyObject operation, only the empty tag-set is + // supported. Any requests that attempt to write non-empty tags into directory + // buckets will receive a 501 Not Implemented status code. When the destination + // bucket is a directory bucket, you will receive a 501 Not Implemented response + // in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. + Tagging *string + + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. The default value is + // COPY . Directory buckets - For directory buckets in a CopyObject operation, + // only the empty tag-set is supported. Any requests that attempt to write + // non-empty tags into directory buckets will receive a 501 Not Implemented status + // code. When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. + TaggingDirective types.TaggingDirective + + // If the destination bucket is configured as a website, redirects requests for + // this object copy to another object in the same bucket or to an external URL. + // Amazon S3 stores the value of this header in the object metadata. This value is + // unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. This functionality is not supported for + // directory buckets. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + +type CopyObjectOutput struct { + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // Container for all response elements. + CopyObjectResult *types.CopyObjectResult + + // Version ID of the source object that was copied. This functionality is not + // supported when the source object is in a directory bucket. + CopySourceVersionId *string + + // If the object expiration is configured, the response includes this header. This + // functionality is not supported for directory buckets. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. + SSEKMSEncryptionContext *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created copy. This functionality is not supported for + // directory buckets. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCopyObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CopyObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CopyObject", + } +} + +// getCopyObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCopyObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*CopyObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCopyObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go new file mode 100644 index 000000000..b39244bcf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -0,0 +1,338 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) +// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. There are two types of buckets: general purpose +// buckets and directory buckets. For more information about these bucket types, +// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So +// the signature calculations in Signature Version 4 must use us-east-1 as the +// Region, even if the location constraint in the request specifies another Region +// where the bucket is to be created. If you create a bucket in a Region other than +// US East (N. Virginia), your application must be able to handle 307 redirect. For +// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: +// - Access control lists (ACLs) - In your CreateBucket request, if you specify +// an access control list (ACL) and set it to public-read , public-read-write , +// authenticated-read , or if you explicitly specify any other custom ACLs, both +// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your +// CreateBucket request, if you set the ACL to private , or if you don't specify +// any ACLs, only the s3:CreateBucket permission is required. +// - Object Lock - In your CreateBucket request, if you set +// x-amz-bucket-object-lock-enabled to true, the +// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are +// required. +// - S3 Object Ownership - If your CreateBucket request includes the +// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls +// permission is required. To set an ACL on a bucket as part of a CreateBucket +// request, you must explicitly set S3 Object Ownership for the bucket to a +// different value than the default, BucketOwnerEnforced . Additionally, if your +// desired bucket ACL grants public access, you must first create the bucket +// (without the bucket ACL) and then explicitly disable Block Public Access on the +// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket +// with a public ACL, the request will fail. For the majority of modern use cases +// in S3, we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside of your +// account, you can use bucket policies as needed. For more information, see +// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object +// Ownership, and S3 Block Public Access are not supported for directory buckets. +// For directory buckets, all Block Public Access settings are enabled at the +// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs +// disabled). These settings can't be modified. For more information about +// permissions for creating and working with directory buckets, see Directory +// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// CreateBucket : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { + if params == nil { + params = &CreateBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketInput struct { + + // The name of the bucket to create. General purpose buckets - For information + // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. Directory buckets - When you use this operation + // with a directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. This functionality is not supported for + // directory buckets. + ACL types.BucketCannedACL + + // The configuration information for the bucket. + CreateBucketConfiguration *types.CreateBucketConfiguration + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. This functionality is not supported for directory buckets. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for directory buckets. + GrantRead *string + + // Allows grantee to read the bucket ACL. This functionality is not supported for + // directory buckets. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. This functionality is not supported for directory buckets. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for directory buckets. + GrantWriteACP *string + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // This functionality is not supported for directory buckets. + ObjectLockEnabledForBucket *bool + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. + ObjectOwnership types.ObjectOwnership + + noSmithyDocumentSerde +} + +func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) + p.DisableAccessPoints = ptr.Bool(true) +} + +type CreateBucketOutput struct { + + // A forward slash followed by the name of the bucket. + Location *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCreateBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CreateBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateBucket", + } +} + +// getCreateBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go new file mode 100644 index 000000000..c083c32d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -0,0 +1,678 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. You +// specify this upload ID in each of your subsequent upload part requests (see +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// ). You also include this upload ID in the final request to either complete or +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. After you initiate a multipart upload and upload +// one or more parts, to stop being charged for storing the uploaded parts, you +// must either complete or abort the multipart upload. Amazon S3 frees up the space +// used to store the parts and stops charging you for storing them only after you +// either complete or abort a multipart upload. If you have configured a lifecycle +// rule to abort incomplete multipart uploads, the created multipart upload must be +// completed within the number of days specified in the bucket lifecycle +// configuration. Otherwise, the incomplete multipart upload becomes eligible for +// an abort action and Amazon S3 aborts the multipart upload. For more information, +// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// . +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Request signing For request signing, multipart upload is just a series of +// regular requests. You initiate a multipart upload, send one or more requests to +// upload parts, and then complete the multipart upload process. You sign each +// request individually. There is nothing special about signing multipart upload +// requests. For more information about signing, see Authenticating Requests +// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption by +// using an Amazon Web Services KMS key, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption +// - General purpose buckets - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. Amazon S3 automatically encrypts all new +// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you +// don't specify encryption information in your request, the encryption setting of +// the uploaded parts is set to the default encryption configuration of the +// destination bucket. By default, all buckets have a base level of encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). If the destination bucket has a default encryption configuration that +// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), +// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding +// KMS key, or a customer-provided key to encrypt the uploaded parts. When you +// perform a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon S3 +// encrypts the object with a different encryption key (such as an Amazon S3 +// managed key, a KMS key, or a customer-provided key). When the encryption setting +// in your request is different from the default encryption configuration of the +// destination bucket, the encryption setting in your request takes precedence. If +// you choose to provide your own encryption key, the request headers you provide +// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload request. +// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( +// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) +// – If you want Amazon Web Services to manage the keys used to encrypt data, +// specify the following headers in the request. +// - x-amz-server-side-encryption +// - x-amz-server-side-encryption-aws-kms-key-id +// - x-amz-server-side-encryption-context +// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide +// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web +// Services managed key ( aws/s3 key) in KMS to protect the data. +// - To perform a multipart upload with encryption by using an Amazon Web +// Services KMS key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey* actions on the key. These permissions are required +// because Amazon S3 must decrypt and read data from the encrypted file parts +// before it completes the multipart upload. For more information, see Multipart +// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - If your Identity and Access Management (IAM) user or role is in the same +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role is in a different account from the +// key, then you must have the permissions on both the key policy and your IAM user +// or role. +// - All GET and PUT requests for an object protected by KMS fail if you don't +// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see +// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side +// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with customer-provided encryption keys (SSE-C), see +// Protecting data using server-side encryption with customer-provided encryption +// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// - Directory buckets -For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CreateMultipartUpload : +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { + if params == nil { + params = &CreateMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMultipartUploadInput struct { + + // The name of the bucket where the multipart upload is initiated and where the + // object is uploaded. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload is to be initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and + // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. By default, all objects are private. Only the owner + // has full access control. When uploading an object, you can grant access + // permissions to individual Amazon Web Services accounts or to predefined groups + // defined by Amazon S3. These permissions are then added to the access control + // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . One way to grant the permissions using the request headers is to specify a + // canned ACL with the x-amz-acl request header. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with an object action doesn’t + // affect bucket-level settings for S3 Bucket Key. This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. + ContentEncoding *string + + // The language that the content is in. + ContentLanguage *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Specify access permissions explicitly to give the grantee READ, READ_ACP, and + // WRITE_ACP permissions on the object. By default, all objects are private. Only + // the owner has full access control. When uploading an object, you can use this + // header to explicitly grant access permissions to specific Amazon Web Services + // accounts or groups. This header maps to specific permissions that Amazon S3 + // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string + + // Specify access permissions explicitly to allow grantee to read the object data + // and its metadata. By default, all objects are private. Only the owner has full + // access control. When uploading an object, you can use this header to explicitly + // grant access permissions to specific Amazon Web Services accounts or groups. + // This header maps to specific permissions that Amazon S3 supports in an ACL. For + // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string + + // Specify access permissions explicitly to allows grantee to read the object ACL. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string + + // Specify access permissions explicitly to allows grantee to allow grantee to + // write the ACL for the applicable object. By default, all objects are private. + // Only the owner has full access control. When uploading an object, you can use + // this header to explicitly grant access permissions to specific Amazon Web + // Services accounts or groups. This header maps to specific permissions that + // Amazon S3 supports in an ACL. For more information, see Access Control List + // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether you want to apply a legal hold to the uploaded object. This + // functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + // This functionality is not supported for directory buckets. + ObjectLockMode types.ObjectLockMode + + // Specifies the date and time when you want the Object Lock to expire. This + // functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This functionality is not + // supported for directory buckets. + SSEKMSEncryptionContext *string + + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. This functionality is not + // supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query + // parameters. This functionality is not supported for directory buckets. + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type CreateMultipartUploadOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort + // incomplete multipart uploads and the prefix in the lifecycle rule matches the + // object name in the request, the response includes this header. The header + // indicates when the initiated multipart upload becomes eligible for an abort + // operation. For more information, see Aborting Incomplete Multipart Uploads + // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id + // header that provides the ID of the lifecycle configuration rule that defines the + // abort action. This functionality is not supported for directory buckets. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Object key for which the multipart upload was initiated. + Key *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. + SSEKMSEncryptionContext *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // ID for the initiated multipart upload. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { + return err + } + return nil +} + +func (v *CreateMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateMultipartUpload", + } +} + +// getCreateMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go new file mode 100644 index 000000000..e2d5a007d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory +// bucket, use the CreateSession API operation. Specifically, you grant +// s3express:CreateSession permission to a bucket in a bucket policy or an IAM +// identity-based policy. Then, you use IAM credentials to make the CreateSession +// API request on the bucket, which returns temporary security credentials that +// include the access key ID, secret access key, session token, and expiration. +// These credentials have associated permissions to access the Zonal endpoint APIs. +// After the session is created, you don’t need to use other policies to grant +// permissions to each Zonal endpoint API individually. Instead, in your Zonal +// endpoint API requests, you sign your requests by applying the temporary security +// credentials of the session to the request headers and following the SigV4 +// protocol for authentication. You also apply the session token to the +// x-amz-s3session-token request header for authorization. Temporary security +// credentials are scoped to the bucket and expire after 5 minutes. After the +// expiration time, any calls that you make with those credentials will fail. You +// must use IAM credentials again to make a CreateSession API request that +// generates a new set of temporary credentials for use. Temporary credentials +// cannot be extended or refreshed beyond the original specified interval. If you +// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We +// recommend that you use the Amazon Web Services SDKs to initiate and manage +// requests to the CreateSession API. For more information, see Performance +// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// - You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the CopyObject API +// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// . +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the HeadBucket API +// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// . +// +// Permissions To obtain temporary security credentials, you must create a bucket +// policy or an IAM identity-based policy that grants s3express:CreateSession +// permission to the bucket. In a policy, you can have the s3express:SessionMode +// condition key to control who can create a ReadWrite or ReadOnly session. For +// more information about ReadWrite or ReadOnly sessions, see +// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) +// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint +// APIs, the bucket policy should also grant both accounts the +// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - +// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { + if params == nil { + params = &CreateSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSessionInput struct { + + // The name of the bucket that you create a session for. + // + // This member is required. + Bucket *string + + // Specifies the mode of the session that will be created, either ReadWrite or + // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is + // capable of executing all the Zonal endpoint APIs on a directory bucket. A + // ReadOnly session is constrained to execute the following Zonal endpoint APIs: + // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and + // ListMultipartUploads . + SessionMode types.SessionMode + + noSmithyDocumentSerde +} + +func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + +type CreateSessionOutput struct { + + // The established temporary security credentials for the created session.. + // + // This member is required. + Credentials *types.SessionCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CreateSessionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateSession", + } +} + +// getCreateSessionBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateSessionBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateSessionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateSessionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go new file mode 100644 index 000000000..30e1381bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucket : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { + if params == nil { + params = &DeleteBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInput struct { + + // Specifies the bucket being deleted. Directory buckets - When you use this + // operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucket", + } +} + +// getDeleteBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignDeleteBucket is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &DeleteBucketInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns, + c.client.addOperationDeleteBucketMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addDeleteBucketPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..0033825a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -0,0 +1,210 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes an analytics +// configuration for the bucket (specified by the analytics configuration ID). To +// use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 analytics feature, see Amazon S3 +// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . The following operations are related to DeleteBucketAnalyticsConfiguration : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is deleted. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketAnalyticsConfiguration", + } +} + +// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go new file mode 100644 index 000000000..d465826fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes the cors +// configuration information set for the bucket. To use this operation, you must +// have permission to perform the s3:PutBucketCORS action. The bucket owner has +// this permission by default and can grant this permission to others. For +// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. Related Resources +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { + if params == nil { + params = &DeleteBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketCorsInput struct { + + // Specifies the bucket whose cors configuration is being deleted. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketCors", + } +} + +// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go new file mode 100644 index 000000000..7be8c4759 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -0,0 +1,206 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This implementation of +// the DELETE action resets the default encryption for the bucket as server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. To use this operation, you must have permissions to +// perform the s3:PutEncryptionConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. The following operations are related to +// DeleteBucketEncryption : +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { + if params == nil { + params = &DeleteBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketEncryptionInput struct { + + // The name of the bucket containing the server-side encryption configuration to + // delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketEncryption", + } +} + +// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..734d23b04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to DeleteBucketIntelligentTieringConfiguration include: +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketIntelligentTieringConfiguration", + } +} + +// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go new file mode 100644 index 000000000..3b8d81a43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -0,0 +1,208 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes an inventory +// configuration (identified by the inventory ID) from the bucket. To use this +// operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// . Operations related to DeleteBucketInventoryConfiguration include: +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketInventoryConfiguration", + } +} + +// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go new file mode 100644 index 000000000..88928b284 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -0,0 +1,205 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes the lifecycle +// configuration from the specified bucket. Amazon S3 removes all the lifecycle +// configuration rules in the lifecycle subresource associated with the bucket. +// Your objects never expire, and Amazon S3 no longer automatically deletes any +// objects on the basis of rules contained in the deleted lifecycle configuration. +// To use this operation, you must have permission to perform the +// s3:PutLifecycleConfiguration action. By default, the bucket owner has this +// permission and the bucket owner can grant this permission to others. There is +// usually some time lag before lifecycle configuration deletion is fully +// propagated to all the Amazon S3 systems. For more information about the object +// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) +// . Related actions include: +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { + if params == nil { + params = &DeleteBucketLifecycleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketLifecycleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketLifecycleInput struct { + + // The bucket name of the lifecycle to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketLifecycleOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketLifecycle"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketLifecycleInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketLifecycle", + } +} + +// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketLifecycleInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketLifecycleBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go new file mode 100644 index 000000000..21384351f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -0,0 +1,213 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes a metrics +// configuration for the Amazon CloudWatch request metrics (specified by the +// metrics configuration ID) from the bucket. Note that this doesn't include the +// daily storage metrics. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to DeleteBucketMetricsConfiguration : +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketMetricsConfiguration", + } +} + +// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go new file mode 100644 index 000000000..4beac6b09 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Removes OwnershipControls +// for an Amazon S3 bucket. To use this operation, you must have the +// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 +// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) +// . The following operations are related to DeleteBucketOwnershipControls : +// - GetBucketOwnershipControls +// - PutBucketOwnershipControls +func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { + if params == nil { + params = &DeleteBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketOwnershipControlsInput struct { + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketOwnershipControls", + } +} + +// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go new file mode 100644 index 000000000..b8e1f56a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -0,0 +1,239 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the DeleteBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucketPolicy +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { + if params == nil { + params = &DeleteBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketPolicyInput struct { + + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketPolicy", + } +} + +// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go new file mode 100644 index 000000000..9fdc6bcf3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -0,0 +1,203 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes the replication +// configuration from the bucket. To use this operation, you must have permissions +// to perform the s3:PutReplicationConfiguration action. The bucket owner has +// these permissions by default and can grant it to others. For more information +// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . It can take a while for the deletion of a replication configuration to fully +// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. The following operations are related to +// DeleteBucketReplication : +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { + if params == nil { + params = &DeleteBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketReplicationInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketReplication", + } +} + +// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go new file mode 100644 index 000000000..ae737d40a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -0,0 +1,198 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Deletes the tags from the +// bucket. To use this operation, you must have permission to perform the +// s3:PutBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. The following operations are related to +// DeleteBucketTagging : +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { + if params == nil { + params = &DeleteBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketTaggingInput struct { + + // The bucket that has the tag set to be removed. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketTagging", + } +} + +// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go new file mode 100644 index 000000000..425936dfa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -0,0 +1,205 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This action removes the +// website configuration for a bucket. Amazon S3 returns a 200 OK response upon +// successfully deleting a website configuration on the specified bucket. You will +// get a 200 OK response if the website configuration you are trying to delete +// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket +// specified in the request does not exist. This DELETE action requires the +// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete +// the website configuration attached to a bucket. However, bucket owners can grant +// other users permission to delete the website configuration by writing a bucket +// policy granting them the S3:DeleteBucketWebsite permission. For more +// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . The following operations are related to DeleteBucketWebsite : +// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { + if params == nil { + params = &DeleteBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketWebsiteInput struct { + + // The bucket name for which you want to remove the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketWebsite", + } +} + +// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go new file mode 100644 index 000000000..c1e5ff73a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -0,0 +1,372 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes an object from a bucket. The behavior depends on the bucket's +// versioning state: +// +// - If bucket versioning is not enabled, the operation permanently deletes the +// object. +// +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete an object +// in a versioned bucket, you must include the object’s versionId in the request. +// For more information about versioning-enabled buckets, see Deleting object +// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html) +// . +// +// - If bucket versioning is suspended, the operation removes the object that +// has a null versionId , if there is one, and inserts a delete marker that +// becomes the current version of the object. If there isn't an object with a null +// versionId , and all versions of the object have a versionId , Amazon S3 does +// not remove the object and only inserts a delete marker. To permanently delete an +// object that has a versionId , you must include the object’s versionId in the +// request. For more information about versioning-suspended buckets, see +// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html) +// . +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. Using +// this query parameter permanently deletes the version. If the object deleted is a +// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, see +// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) +// . Directory buckets - MFA delete is not supported by directory buckets. You can +// delete objects by explicitly calling DELETE Object or calling ( +// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// ) to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny them +// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion +// permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is +// related to DeleteObject : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { + if params == nil { + params = &DeleteObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectInput struct { + + // The bucket name of the bucket containing the object. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key name of the object to delete. + // + // This member is required. + Key *string + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to + // process this operation. To use this header, you must have the + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. + BypassGovernanceRetention *bool + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. This functionality is not supported for directory buckets. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type DeleteObjectOutput struct { + + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. This functionality is not supported for directory + // buckets. + DeleteMarker *bool + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. This functionality is not supported for directory buckets. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteObject", + } +} + +// getDeleteObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignDeleteObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &DeleteObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns, + c.client.addOperationDeleteObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addDeleteObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go new file mode 100644 index 000000000..c5f31dec6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -0,0 +1,228 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Removes the entire tag +// set from the specified object. For more information about managing object tags, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// . To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. To delete tags of a specific object version, add +// the versionId query parameter in the request. You will need permission for the +// s3:DeleteObjectVersionTagging action. The following operations are related to +// DeleteObjectTagging : +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { + if params == nil { + params = &DeleteObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectTaggingInput struct { + + // The bucket name containing the objects from which to remove the tags. Access + // points - When you use this action with an access point, you must provide the + // alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key that identifies the object in the bucket from which to remove all tags. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The versionId of the object that the tag-set will be removed from. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type DeleteObjectTaggingOutput struct { + + // The versionId of the object the tag-set was removed from. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeleteObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteObjectTagging", + } +} + +// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go new file mode 100644 index 000000000..05f82cf75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -0,0 +1,381 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, then +// this operation provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. The request can contain a list of up to +// 1000 keys that you want to delete. In the XML, you provide the object key names, +// and optionally, version IDs if you want to delete a specific version of the +// object from a versioning-enabled bucket. For each key, Amazon S3 performs a +// delete operation and returns the result of that delete, success or failure, in +// the response. Note that if the object specified in the request is not found, +// Amazon S3 returns the result as deleted. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any +// information about the delete in the response body. When performing this action +// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, +// you must include an MFA token. If you do not provide one, the entire request +// will fail, even if there are non-versioned objects you are trying to delete. If +// you provide an invalid token, whether there are versioned keys in the request or +// not, the entire Multi-Object Delete request will fail. For information about MFA +// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by +// directory buckets. Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Content-MD5 request header +// - General purpose bucket - The Content-MD5 request header is required for all +// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that +// your request body has not been altered in transit. +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , +// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all +// Multi-Object Delete requests. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to DeleteObjects : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { + if params == nil { + params = &DeleteObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectsInput struct { + + // The bucket name containing the objects to delete. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + Delete *types.Delete + + // Specifies whether you want to delete this object even if it has a + // Governance-type Object Lock in place. To use this header, you must have the + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. + BypassGovernanceRetention *bool + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . If you provide an individual + // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. When performing the DeleteObjects operation on an MFA delete + // enabled bucket, which attempts to delete the specified versioned objects, you + // must include an MFA token. If you don't provide an MFA token, the entire request + // will fail, even if there are non-versioned objects that you are trying to + // delete. If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. For + // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type DeleteObjectsOutput struct { + + // Container element for a successful delete. It identifies the object that was + // successfully deleted. + Deleted []types.DeletedObject + + // Container for a failed delete action that describes the object that Amazon S3 + // attempted to delete and the error it encountered. + Errors []types.Error + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjects"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *DeleteObjectsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteObjects", + } +} + +// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*DeleteObjectsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getDeleteObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go new file mode 100644 index 000000000..43969e2b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Removes the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to DeletePublicAccessBlock : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { + if params == nil { + params = &DeletePublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeletePublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeletePublicAccessBlockInput struct { + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeletePublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *DeletePublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeletePublicAccessBlock", + } +} + +// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*DeletePublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeletePublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go new file mode 100644 index 000000000..4bb1ff71c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -0,0 +1,230 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the accelerate subresource to return the Transfer +// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 +// Transfer Acceleration is a bucket-level feature that enables you to perform +// faster data transfers to and from Amazon S3. To use this operation, you must +// have permission to perform the s3:GetAccelerateConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an +// existing bucket to Enabled or Suspended by using the +// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. A GET accelerate request does not return a state value for a bucket +// that has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. For more information about +// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAccelerateConfiguration : +// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &GetBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAccelerateConfigurationInput struct { + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketAccelerateConfigurationOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // The accelerate configuration of the bucket. + Status types.BucketAccelerateStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAccelerateConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketAccelerateConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketAccelerateConfiguration", + } +} + +// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go new file mode 100644 index 000000000..bc7c4ea18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -0,0 +1,226 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the acl subresource to return the access control list (ACL) +// of a bucket. To use GET to return the ACL of the bucket, you must have the +// READ_ACP access to the bucket. If READ_ACP permission is granted to the +// anonymous user, you can return the ACL of the bucket without using an +// authorization header. When you use this API operation with an access point, +// provide the alias of the access point in place of the bucket name. When you use +// this API operation with an Object Lambda access point, provide the alias of the +// Object Lambda access point in place of the bucket name. If the Object Lambda +// access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the +// bucket-owner-full-control ACL with the owner being the account that created the +// bucket. For more information, see Controlling object ownership and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAcl : +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { + if params == nil { + params = &GetBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAclInput struct { + + // Specifies the S3 bucket whose ACL is being requested. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketAcl", + } +} + +// getGetBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..64e41d403 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -0,0 +1,216 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This implementation of +// the GET action returns an analytics configuration (identified by the analytics +// configuration ID) from the bucket. To use this operation, you must have +// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, +// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAnalyticsConfiguration : +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &GetBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is retrieved. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketAnalyticsConfigurationOutput struct { + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketAnalyticsConfiguration", + } +} + +// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go new file mode 100644 index 000000000..0997225eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the Cross-Origin +// Resource Sharing (CORS) configuration information set for the bucket. To use +// this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. When you use this API operation with an access point, provide the alias +// of the access point in place of the bucket name. When you use this API operation +// with an Object Lambda access point, provide the alias of the Object Lambda +// access point in place of the bucket name. If the Object Lambda access point +// alias in a request is not valid, the error code InvalidAccessPointAliasError is +// returned. For more information about InvalidAccessPointAliasError , see List of +// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// . The following operations are related to GetBucketCors : +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { + if params == nil { + params = &GetBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketCorsInput struct { + + // The bucket name for which to get the cors configuration. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketCorsOutput struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + CORSRules []types.CORSRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketCors", + } +} + +// getGetBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go new file mode 100644 index 000000000..22c1f9bb5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the default +// encryption configuration for an Amazon S3 bucket. By default, all buckets have a +// default encryption configuration that uses server-side encryption with Amazon S3 +// managed keys (SSE-S3). For information about the bucket default encryption +// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. To use this operation, you must have permission to +// perform the s3:GetEncryptionConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to GetBucketEncryption : +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { + if params == nil { + params = &GetBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketEncryptionInput struct { + + // The name of the bucket from which the server-side encryption configuration is + // retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketEncryptionOutput struct { + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketEncryption", + } +} + +// getGetBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..f3ae88a9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -0,0 +1,216 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Gets the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to GetBucketIntelligentTieringConfiguration include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &GetBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketIntelligentTieringConfiguration", + } +} + +// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go new file mode 100644 index 000000000..123218e97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -0,0 +1,213 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns an inventory +// configuration (identified by the inventory configuration ID) from the bucket. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// . The following operations are related to GetBucketInventoryConfiguration : +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &GetBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketInventoryConfigurationOutput struct { + + // Specifies the inventory configuration. + InventoryConfiguration *types.InventoryConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketInventoryConfiguration", + } +} + +// getGetBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go new file mode 100644 index 000000000..ddcbbe096 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Bucket lifecycle +// configuration now supports specifying a lifecycle rule using an object key name +// prefix, one or more object tags, object size, or any combination of these. +// Accordingly, this section describes the latest API. The previous version of the +// API supported filtering based only on an object key name prefix, which is +// supported for backward compatibility. For the related API description, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// . Accordingly, this section describes the latest API. The response describes the +// new filter element that you can use to specify a filter to select a subset of +// objects to which the rule applies. If you are using a previous version of the +// lifecycle configuration, it still works. For the earlier action, Returns the +// lifecycle configuration information set on the bucket. For information about +// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// . To use this operation, you must have permission to perform the +// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . GetBucketLifecycleConfiguration has the following special error: +// - Error code: NoSuchLifecycleConfiguration +// - Description: The lifecycle configuration does not exist. +// - HTTP Status Code: 404 Not Found +// - SOAP Fault Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration : +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &GetBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to get the lifecycle information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketLifecycleConfigurationOutput struct { + + // Container for a lifecycle rule. + Rules []types.LifecycleRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLifecycleConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketLifecycleConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketLifecycleConfiguration", + } +} + +// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go new file mode 100644 index 000000000..aff5f3cd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -0,0 +1,287 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// This operation is not supported by directory buckets. Returns the Region the +// bucket resides in. You set the bucket's Region using the LocationConstraint +// request parameter in a CreateBucket request. For more information, see +// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// . When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. When you use this API operation with +// an Object Lambda access point, provide the alias of the Object Lambda access +// point in place of the bucket name. If the Object Lambda access point alias in a +// request is not valid, the error code InvalidAccessPointAliasError is returned. +// For more information about InvalidAccessPointAliasError , see List of Error +// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. The following operations are +// related to GetBucketLocation : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { + if params == nil { + params = &GetBucketLocationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLocationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLocationInput struct { + + // The name of the bucket for which to get the location. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketLocationOutput struct { + + // Specifies the Region where the bucket resides. For a list of all the Amazon S3 + // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // . Buckets in Region us-east-1 have a LocationConstraint of null . + LocationConstraint types.BucketLocationConstraint + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLocation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapDeserializerHelper(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation_custom struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{}) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err == io.EOF { + err = nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +// Helper to swap in a custom deserializer +func swapDeserializerHelper(stack *middleware.Stack) error { + _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{}) + if err != nil { + return err + } + return nil +} + +func (v *GetBucketLocationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketLocation", + } +} + +// getGetBucketLocationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLocationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLocationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go new file mode 100644 index 000000000..d1c4f8fbb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -0,0 +1,203 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the logging +// status of a bucket and the permissions users have to view and modify that +// status. The following operations are related to GetBucketLogging : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { + if params == nil { + params = &GetBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLoggingInput struct { + + // The bucket name for which to get the logging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketLoggingOutput struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all + // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *types.LoggingEnabled + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLogging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketLoggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketLogging", + } +} + +// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go new file mode 100644 index 000000000..d7499c68c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -0,0 +1,217 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Gets a metrics +// configuration (specified by the metrics configuration ID) from the bucket. Note +// that this doesn't include the daily storage metrics. To use this operation, you +// must have permissions to perform the s3:GetMetricsConfiguration action. The +// bucket owner has this permission by default. The bucket owner can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to GetBucketMetricsConfiguration : +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketMetricsConfigurationOutput struct { + + // Specifies the metrics configuration. + MetricsConfiguration *types.MetricsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketMetricsConfiguration", + } +} + +// getGetBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go new file mode 100644 index 000000000..73155110d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the notification +// configuration of a bucket. If notifications are not enabled on the bucket, the +// action returns an empty NotificationConfiguration element. By default, you must +// be the bucket owner to read the notification configuration of a bucket. However, +// the bucket owner can use a bucket policy to grant permission to other users to +// read this configuration with the s3:GetBucketNotification permission. When you +// use this API operation with an access point, provide the alias of the access +// point in place of the bucket name. When you use this API operation with an +// Object Lambda access point, provide the alias of the Object Lambda access point +// in place of the bucket name. If the Object Lambda access point alias in a +// request is not valid, the error code InvalidAccessPointAliasError is returned. +// For more information about InvalidAccessPointAliasError , see List of Error +// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// . The following action is related to GetBucketNotification : +// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &GetBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketNotificationConfigurationInput struct { + + // The name of the bucket for which to get the notification configuration. When + // you use this API operation with an access point, provide the alias of the access + // point in place of the bucket name. When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +// A container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off for the bucket. +type GetBucketNotificationConfigurationOutput struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *types.EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []types.LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []types.QueueConfiguration + + // The topic to which notifications are sent and the events for which + // notifications are generated. + TopicConfigurations []types.TopicConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketNotificationConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketNotificationConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketNotificationConfiguration", + } +} + +// getGetBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go new file mode 100644 index 000000000..cea151428 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -0,0 +1,205 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Retrieves +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:GetBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// . The following operations are related to GetBucketOwnershipControls : +// - PutBucketOwnershipControls +// - DeleteBucketOwnershipControls +func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { + if params == nil { + params = &GetBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketOwnershipControlsOutput struct { + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) currently in effect for this Amazon S3 bucket. + OwnershipControls *types.OwnershipControls + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketOwnershipControls", + } +} + +// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, +func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go new file mode 100644 index 000000000..c2f98f936 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -0,0 +1,255 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the GetBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:GetBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following action is related to GetBucketPolicy : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { + if params == nil { + params = &GetBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyInput struct { + + // The bucket name to get the bucket policy for. Directory buckets - When you use + // this operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide Access points - When you use this API operation with + // an access point, provide the alias of the access point in place of the bucket + // name. Object Lambda access points - When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketPolicyOutput struct { + + // The bucket policy as a JSON document. + Policy *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketPolicy", + } +} + +// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go new file mode 100644 index 000000000..cb36ac504 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -0,0 +1,208 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Retrieves the policy +// status for an Amazon S3 bucket, indicating whether the bucket is public. In +// order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . For more information about when Amazon S3 considers a bucket public, see The +// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to GetBucketPolicyStatus : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { + if params == nil { + params = &GetBucketPolicyStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyStatusInput struct { + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketPolicyStatusOutput struct { + + // The policy status for the specified bucket. + PolicyStatus *types.PolicyStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicyStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketPolicyStatusInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketPolicyStatus", + } +} + +// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyStatusInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyStatusBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go new file mode 100644 index 000000000..7e44d38ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -0,0 +1,212 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the replication +// configuration of a bucket. It can take a while to propagate the put or delete a +// replication configuration to all Amazon S3 systems. Therefore, a get request +// soon after put or delete can return a wrong result. For information about +// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. This action requires permissions for the +// s3:GetReplicationConfiguration action. For more information about permissions, +// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// . If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. For information about GetBucketReplication errors, +// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// The following operations are related to GetBucketReplication : +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { + if params == nil { + params = &GetBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketReplicationInput struct { + + // The bucket name for which to get the replication information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketReplicationOutput struct { + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *types.ReplicationConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketReplication", + } +} + +// getGetBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go new file mode 100644 index 000000000..16cc52822 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -0,0 +1,201 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the request +// payment configuration of a bucket. To use this version of the operation, you +// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// . The following operations are related to GetBucketRequestPayment : +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { + if params == nil { + params = &GetBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketRequestPaymentInput struct { + + // The name of the bucket for which to get the payment request configuration + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketRequestPaymentOutput struct { + + // Specifies who pays for the download and request fees. + Payer types.Payer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketRequestPayment"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketRequestPaymentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketRequestPayment", + } +} + +// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go new file mode 100644 index 000000000..69a6e4903 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the tag set +// associated with the bucket. To use this operation, you must have permission to +// perform the s3:GetBucketTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. GetBucketTagging has the +// following special error: +// - Error code: NoSuchTagSet +// - Description: There is no tag set associated with the bucket. +// +// The following operations are related to GetBucketTagging : +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { + if params == nil { + params = &GetBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketTaggingInput struct { + + // The name of the bucket for which to get the tagging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketTagging", + } +} + +// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go new file mode 100644 index 000000000..10540b136 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -0,0 +1,210 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the versioning +// state of a bucket. To retrieve the versioning state of a bucket, you must be the +// bucket owner. This implementation also returns the MFA Delete status of the +// versioning state. If the MFA Delete status is enabled , the bucket owner must +// use an authentication device to change the versioning state of the bucket. The +// following operations are related to GetBucketVersioning : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { + if params == nil { + params = &GetBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketVersioningInput struct { + + // The name of the bucket for which to get the versioning information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketVersioningOutput struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete types.MFADeleteStatus + + // The versioning state of the bucket. + Status types.BucketVersioningStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketVersioning"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketVersioningInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketVersioning", + } +} + +// getGetBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go new file mode 100644 index 000000000..c87f6ff1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -0,0 +1,217 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the website +// configuration for a bucket. To host website on Amazon S3, you can configure a +// bucket as website by adding a website configuration. For more information about +// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// the bucket owner can read the bucket website configuration. However, bucket +// owners can allow other users to read the website configuration by writing a +// bucket policy granting them the S3:GetBucketWebsite permission. The following +// operations are related to GetBucketWebsite : +// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { + if params == nil { + params = &GetBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketWebsiteInput struct { + + // The bucket name for which to get the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketWebsiteOutput struct { + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *types.ErrorDocument + + // The name of the index document for the website (for example index.html ). + IndexDocument *types.IndexDocument + + // Specifies the redirect behavior of all requests to a website endpoint of an + // Amazon S3 bucket. + RedirectAllRequestsTo *types.RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []types.RoutingRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketWebsite", + } +} + +// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go new file mode 100644 index 000000000..a64f5964e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -0,0 +1,687 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Retrieves an object from Amazon S3. In the GetObject request, specify the full +// key name for the object. General purpose buckets - Both the virtual-hosted-style +// requests and the path-style requests are supported. For a virtual hosted-style +// request example, if you have the object photos/2006/February/sample.jpg , +// specify the object key name as /photos/2006/February/sample.jpg . For a +// path-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the +// object key name as /examplebucket/photos/2006/February/sample.jpg . For more +// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style +// requests are supported. For a virtual hosted-style request example, if you have +// the object photos/2006/February/sample.jpg in the bucket named +// examplebucket--use1-az5--x-s3 , specify the object key name as +// /photos/2006/February/sample.jpg . Also, when you make requests to this API +// operation, your requests are sent to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject , you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For more +// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If the +// object that you request doesn’t exist, the error that Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Storage classes If the object you are retrieving is stored in the S3 Glacier +// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the +// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the +// S3 Express One Zone storage class is supported to store newly created objects. +// Unsupported storage class values won't write a destination object and will +// respond with the HTTP status code 400 Bad Request . Encryption Encryption +// request headers, like x-amz-server-side-encryption , should not be sent for the +// GetObject requests, if your object uses server-side encryption with Amazon S3 +// managed encryption keys (SSE-S3), server-side encryption with Key Management +// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject +// requests for the object that uses these types of keys, you’ll get an HTTP 400 +// Bad Request error. Overriding response header values through the request There +// are times when you want to override certain response header values of a +// GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. You can override values +// for a set of response headers. These modified response header values are +// included only in a successful response, that is, when the HTTP status code 200 +// OK is returned. The headers you can override using the following query +// parameters in the request are a subset of the headers that Amazon S3 accepts +// when you create an object. The response headers that you can override for the +// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , +// Content-Language , Content-Type , and Expires . To override values for a set of +// response headers in the GetObject response, you can use the following query +// parameters in the request. +// - response-cache-control +// - response-content-disposition +// - response-content-encoding +// - response-content-language +// - response-content-type +// - response-expires +// +// When you use these parameters, you must sign the request by using either an +// Authorization header or a presigned URL. These parameters cannot be used with an +// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// The following operations are related to GetObject : +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { + if params == nil { + params = &GetObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectInput struct { + + // The bucket name containing the object. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this + // action with an Object Lambda access point, you must direct requests to the + // Object Lambda access point hostname. The Object Lambda access point hostname + // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object to get. + // + // This member is required. + Key *string + + // To retrieve the checksum, this mode must be enabled. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified in this header; otherwise, return a 412 Precondition Failed error. If + // both of the If-Match and If-Unmodified-Since headers are present in the request + // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since + // condition evaluates to false ; then, S3 returns 200 OK and the data requested. + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 Not Modified error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: If-None-Match + // condition evaluates to false , and; If-Modified-Since condition evaluates to + // true ; then, S3 returns 304 Not Modified status code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified in this header; otherwise, return a 304 Not Modified error. If both + // of the If-None-Match and If-Modified-Since headers are present in the request + // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since + // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status + // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 Precondition Failed error. If both of the If-Match and + // If-Unmodified-Since headers are present in the request as follows: If-Match + // condition evaluates to true , and; If-Unmodified-Since condition evaluates to + // false ; then, S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int32 + + // Downloads the specified byte range of an object. For more information about the + // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) + // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use when decrypting the object (for example, AES256 + // ). If you encrypt an object by using server-side encryption with + // customer-provided encryption keys (SSE-C) when you store the object in Amazon + // S3, then when you GET the object, you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key that you originally provided for + // Amazon S3 to encrypt the data before storing it. This value is used to decrypt + // the object when recovering it and must match the one used when storing the data. + // The key must be appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object + // by using server-side encryption with customer-provided encryption keys (SSE-C) + // when you store the object in Amazon S3, then when you GET the object, you must + // use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. If you encrypt + // an object by using server-side encryption with customer-provided encryption keys + // (SSE-C) when you store the object in Amazon S3, then when you GET the object, + // you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKeyMD5 *string + + // Version ID used to reference a specific version of the object. By default, the + // GetObject operation returns the current version of an object. To return a + // different version, use the versionId subresource. + // - If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. The + // s3:GetObject permission is not required in this scenario. + // - If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // - Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID is + // supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) + // . + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type GetObjectOutput struct { + + // Indicates that a range of bytes was specified in the request. + AcceptRanges *string + + // Object data. + Body io.ReadCloser + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Indicates what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength *int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Indicates whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + // - If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in the + // response. + // - If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + DeleteMarker *bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Date and time when the object was last modified. General purpose buckets - When + // you specify a versionId of the object in your request, if the specified version + // in the request is a delete marker, the response returns a 405 Method Not Allowed + // error and the Last-Modified: timestamp response header. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in the headers that + // are prefixed with x-amz-meta- . This can happen if you create metadata using an + // API like SOAP that supports more flexible metadata than the REST API. For + // example, using SOAP, you can create metadata whose values are not legal HTTP + // headers. This functionality is not supported for directory buckets. + MissingMeta *int32 + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. This + // functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that's currently in place for this object. This + // functionality is not supported for directory buckets. + ObjectLockMode types.ObjectLockMode + + // The date and time when this object's Object Lock will expire. This + // functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int32 + + // Amazon S3 can return this if your request involves a bucket that is either a + // source or destination in a replication rule. This functionality is not supported + // for directory buckets. + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Provides information about object restoration action and expiration time of the + // restored object copy. This functionality is not supported for directory buckets. + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. + Restore *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. Directory buckets + // - Only the S3 Express One Zone storage class is supported by directory buckets + // to store objects. + StorageClass types.StorageClass + + // The number of tags, if any, on the object, when you have the relevant + // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. This functionality is not + // supported for directory buckets. + TagCount *int32 + + // Version ID of the object. This functionality is not supported for directory + // buckets. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addGetObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObject", + } +} + +// getGetObjectRequestValidationModeMember gets the request checksum validation +// mode provided as input. +func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { + in := input.(*GetObjectInput) + if len(in.ChecksumMode) == 0 { + return "", false + } + return string(in.ChecksumMode), true +} + +func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ + GetValidationMode: getGetObjectRequestValidationModeMember, + ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + IgnoreMultipartValidation: true, + LogValidationSkipped: true, + LogMultipartValidationSkipped: true, + }) +} + +// getGetObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getGetObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignGetObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns, + c.client.addOperationGetObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addGetObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go new file mode 100644 index 000000000..fc903cb39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -0,0 +1,249 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the access +// control list (ACL) of an object. To use this operation, you must have +// s3:GetObjectAcl permissions or READ_ACP access to the object. For more +// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on +// Outposts. By default, GET returns ACL information about the current version of +// an object. To return ACL information about a different version, use the +// versionId subresource. If your bucket uses the bucket owner enforced setting for +// S3 Object Ownership, requests to read ACLs are still supported and return the +// bucket-owner-full-control ACL with the owner being the account that created the +// bucket. For more information, see Controlling object ownership and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetObjectAcl : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { + if params == nil { + params = &GetObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAclInput struct { + + // The bucket name that contains the object for which to get the ACL information. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key of the object for which to get the ACL information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type GetObjectAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectAcl", + } +} + +// getGetObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go new file mode 100644 index 000000000..dd1b9257c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -0,0 +1,396 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Retrieves all the metadata from an object without returning the object itself. +// This operation is useful if you're interested only in an object's metadata. +// GetObjectAttributes combines the functionality of HeadObject and ListParts . All +// of the data returned with each of those individual calls can be returned with a +// single call to GetObjectAttributes . Directory buckets - For directory buckets, +// you must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use GetObjectAttributes , you must +// have READ access to the object. The permissions that you need to use this +// operation with depend on whether the bucket is versioned. If the bucket is +// versioned, you need both the s3:GetObjectVersion and +// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is +// not versioned, you need the s3:GetObject and s3:GetObjectAttributes +// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, the +// error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found ("no such key") error. +// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a GET +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't +// enabled and supported for directory buckets. For this API operation, only the +// null value of the version ID is supported by directory buckets. You can only +// specify null to the versionId query parameter in the request. Conditional +// request headers Consider the following when using request headers: +// - If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the +// data requested: +// - If-Match condition evaluates to true . +// - If-Unmodified-Since condition evaluates to false . For more information +// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) +// . +// - If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows, then Amazon S3 returns the HTTP status code 304 Not +// Modified : +// - If-None-Match condition evaluates to false . +// - If-Modified-Since condition evaluates to true . For more information about +// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to GetObjectAttributes : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { + if params == nil { + params = &GetObjectAttributesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAttributesInput struct { + + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. + // + // This member is required. + ObjectAttributes []types.ObjectAttributes + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts *int32 + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. + SSECustomerKeyMD5 *string + + // The version ID used to reference a specific version of the object. S3 + // Versioning isn't enabled and supported for directory buckets. For this API + // operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectAttributesOutput struct { + + // The checksum or digest of the object. + Checksum *types.Checksum + + // Specifies whether the object retrieved was ( true ) or was not ( false ) a + // delete marker. If false , this response header does not appear in the response. + // This functionality is not supported for directory buckets. + DeleteMarker *bool + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string + + // The creation date of the object. + LastModified *time.Time + + // A collection of parts associated with a multipart upload. + ObjectParts *types.GetObjectAttributesParts + + // The size of the object in bytes. + ObjectSize *int64 + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. + StorageClass types.StorageClass + + // The version ID of the object. This functionality is not supported for directory + // buckets. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectAttributesInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectAttributes", + } +} + +// getGetObjectAttributesBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAttributesInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAttributesBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go new file mode 100644 index 000000000..548f5e1cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -0,0 +1,227 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Gets an object's current +// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectLegalHold : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { + if params == nil { + params = &GetObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLegalHoldInput struct { + + // The bucket name containing the object whose legal hold status you want to + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose legal hold status you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectLegalHoldOutput struct { + + // The current legal hold status for the specified object. + LegalHold *types.ObjectLockLegalHold + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLegalHold"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectLegalHoldInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectLegalHold", + } +} + +// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go new file mode 100644 index 000000000..e8e2fbd9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -0,0 +1,210 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Gets the Object Lock +// configuration for a bucket. The rule specified in the Object Lock configuration +// will be applied by default to every new object placed in the specified bucket. +// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . The following action is related to GetObjectLockConfiguration : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { + if params == nil { + params = &GetObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to retrieve. Access points + // - When you use this action with an access point, you must provide the alias of + // the access point in place of the bucket name or specify the access point ARN. + // When using the access point ARN, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectLockConfigurationOutput struct { + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLockConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectLockConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectLockConfiguration", + } +} + +// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, +func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go new file mode 100644 index 000000000..b4daabf16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -0,0 +1,227 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Retrieves an object's +// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectRetention : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { + if params == nil { + params = &GetObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectRetentionInput struct { + + // The bucket name containing the object whose retention settings you want to + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose retention settings you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectRetentionOutput struct { + + // The container element for an object's retention settings. + Retention *types.ObjectLockRetention + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectRetention"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectRetentionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectRetention", + } +} + +// getGetObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go new file mode 100644 index 000000000..dc15914a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns the tag-set of an +// object. You send the GET request against the tagging subresource associated with +// the object. To use this operation, you must have permission to perform the +// s3:GetObjectTagging action. By default, the GET action returns information about +// current version of an object. For a versioned bucket, you can have multiple +// versions of an object in your bucket. To retrieve tags of any other version, use +// the versionId query parameter. You also need permission for the +// s3:GetObjectVersionTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. For information about the +// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// . The following actions are related to GetObjectTagging : +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { + if params == nil { + params = &GetObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTaggingInput struct { + + // The bucket name containing the object for which to get the tagging information. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which to get the tagging information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The versionId of the object for which to get the tagging information. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // The versionId of the object for which you got the tagging information. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectTagging", + } +} + +// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go new file mode 100644 index 000000000..9fc83178e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -0,0 +1,220 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// This operation is not supported by directory buckets. Returns torrent files +// from a bucket. BitTorrent can save you bandwidth when you're distributing large +// files. You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. To use GET, you must have READ access to the object. This +// functionality is not supported for Amazon S3 on Outposts. The following action +// is related to GetObjectTorrent : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { + if params == nil { + params = &GetObjectTorrentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTorrentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTorrentInput struct { + + // The name of the bucket containing the object for which to get the torrent files. + // + // This member is required. + Bucket *string + + // The object key for which to get the information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type GetObjectTorrentOutput struct { + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTorrent"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetObjectTorrentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetObjectTorrent", + } +} + +// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTorrentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTorrentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go new file mode 100644 index 000000000..3689a4e16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -0,0 +1,214 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Retrieves the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or +// an object, it checks the PublicAccessBlock configuration for both the bucket +// (or the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock settings are different between the bucket and the account, +// Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to GetPublicAccessBlock : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { + if params == nil { + params = &GetPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetPublicAccessBlockOutput struct { + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *GetPublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetPublicAccessBlock", + } +} + +// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*GetPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go new file mode 100644 index 000000000..5f5958916 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -0,0 +1,646 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists and +// you have permission to access it. If the bucket does not exist or you do not +// have permission to access it, the HEAD request returns a generic 400 Bad Request +// , 403 Forbidden or 404 Not Found code. A message body is not included, so you +// cannot determine the exception beyond these HTTP response codes. Directory +// buckets - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions +// - General purpose bucket permissions - To use this operation, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Managing access permissions to your Amazon +// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session is in +// the ReadWrite mode. If you want to restrict the access, you can explicitly set +// the s3express:SessionMode condition key to ReadOnly on the bucket. For more +// information about example bucket policies, see Example bucket policies for S3 +// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { + if params == nil { + params = &HeadBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadBucketInput struct { + + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this API + // operation with an Object Lambda access point, provide the alias of the Object + // Lambda access point in place of the bucket name. If the Object Lambda access + // point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type HeadBucketOutput struct { + + // Indicates whether the bucket name used in the request is an access point alias. + // This functionality is not supported for directory buckets. + AccessPointAlias *bool + + // The name of the location where the bucket will be created. For directory + // buckets, the AZ ID of the Availability Zone where the bucket is created. An + // example AZ ID value is usw2-az1 . This functionality is only supported by + // directory buckets. + BucketLocationName *string + + // The type of location where the bucket is created. This functionality is only + // supported by directory buckets. + BucketLocationType types.LocationType + + // The Region that the bucket is located. This functionality is not supported for + // directory buckets. + BucketRegion *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "HeadBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpHeadBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + +// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter +type BucketExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketExistsWaiter defines the waiters for BucketExists +type BucketExistsWaiter struct { + client HeadBucketAPIClient + + options BucketExistsWaiterOptions +} + +// NewBucketExistsWaiter constructs a BucketExistsWaiter. +func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter { + options := BucketExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter") +} + +func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return true, nil + } + } + + return true, nil +} + +// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter +type BucketNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketNotExistsWaiter defines the waiters for BucketNotExists +type BucketNotExistsWaiter struct { + client HeadBucketAPIClient + + options BucketNotExistsWaiterOptions +} + +// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter. +func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter { + options := BucketNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter") +} + +func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "HeadBucket", + } +} + +// getHeadBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getHeadBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignHeadBucket is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignHeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &HeadBucketInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "HeadBucket", params, clientOptFns, + c.client.addOperationHeadBucketMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addHeadBucketPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addHeadBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go new file mode 100644 index 000000000..5b7e9b6c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -0,0 +1,971 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. A HEAD request has the same options as a GET operation on an object. +// The response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns a +// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 +// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not +// possible to retrieve the exact exception of these error codes. Request headers +// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use HEAD , you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, the error +// that Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a HEAD +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning +// +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// - If the specified version is a delete marker, the response returns a 405 +// Method Not Allowed error and the Last-Modified: timestamp response header. +// +// - Directory buckets - Delete marker is not supported by directory buckets. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to HeadObject : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { + if params == nil { + params = &HeadObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadObjectInput struct { + + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // To retrieve the checksum, this parameter must be enabled. In addition, if you + // enable ChecksumMode and the object is encrypted with Amazon Web Services Key + // Management Service (Amazon Web Services KMS), you must have permission to use + // the kms:Decrypt action for the request to succeed. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. If both of the + // If-Match and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. If both of the + // If-None-Match and If-Modified-Since headers are present in the request as + // follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. If both of the If-Match + // and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int32 + + // HeadObject returns only the metadata for an object. If the Range is + // satisfiable, only the ContentLength is affected in the response. If the Range + // is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. + SSECustomerKeyMD5 *string + + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type HeadObjectOutput struct { + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The archive state of the head object. This functionality is not supported for + // directory buckets. + ArchiveStatus types.ArchiveStatus + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Indicates what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength *int64 + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. This + // functionality is not supported for directory buckets. + DeleteMarker *bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Date and time when the object was last modified. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, you + // can create metadata whose values are not legal HTTP headers. This functionality + // is not supported for directory buckets. + MissingMeta *int32 + + // Specifies whether a legal hold is in effect for this object. This header is + // only returned if the requester has the s3:GetObjectLegalHold permission. This + // header is not returned if the specified version of this object has never had a + // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // . This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode, if any, that's in effect for this object. This header is + // only returned if the requester has the s3:GetObjectRetention permission. For + // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // . This functionality is not supported for directory buckets. + ObjectLockMode types.ObjectLockMode + + // The date and time when the Object Lock retention period expires. This header is + // only returned if the requester has the s3:GetObjectRetention permission. This + // functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int32 + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. In replication, you have + // a source bucket on which you configure replication and destination bucket or + // buckets where Amazon S3 stores object replicas. When you request an object ( + // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will + // return the x-amz-replication-status header in the response as follows: + // - If requesting an object from the source bucket, Amazon S3 will return the + // x-amz-replication-status header if the object in your request is eligible for + // replication. For example, suppose that in your replication configuration, you + // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with + // key prefix TaxDocs . Any objects you upload with this key name prefix, for + // example TaxDocs/document1.pdf , are eligible for replication. For any object + // request with this key name prefix, Amazon S3 will return the + // x-amz-replication-status header with value PENDING, COMPLETED or FAILED + // indicating object replication status. + // - If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in your + // request is a replica that Amazon S3 created and there is no replica modification + // replication in progress. + // - When replicating objects to multiple destination buckets, the + // x-amz-replication-status header acts differently. The header of the source + // object will only return a value of COMPLETED when replication is successful to + // all destinations. The header will remain at value PENDING until replication has + // completed for all destinations. If one or more destinations fails replication + // the header will return FAILED. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // . This functionality is not supported for directory buckets. + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. If an archive copy is already restored, + // the header value indicates when Amazon S3 is scheduled to delete the object + // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 + // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header + // returns the value ongoing-request="true" . For more information about archiving + // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) + // . This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + Restore *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. + StorageClass types.StorageClass + + // Version ID of the object. This functionality is not supported for directory + // buckets. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "HeadObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpHeadObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + +// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter +type ObjectExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectExistsWaiter defines the waiters for ObjectExists +type ObjectExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectExistsWaiterOptions +} + +// NewObjectExistsWaiter constructs a ObjectExistsWaiter. +func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter { + options := ObjectExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter") +} + +func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return true, nil + } + } + + return true, nil +} + +// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter +type ObjectNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectNotExistsWaiter defines the waiters for ObjectNotExists +type ObjectNotExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectNotExistsWaiterOptions +} + +// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter. +func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter { + options := ObjectNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter") +} + +func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "HeadObject", + } +} + +// getHeadObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getHeadObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignHeadObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &HeadObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns, + c.client.addOperationHeadObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addHeadObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go new file mode 100644 index 000000000..67b7571c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Lists the analytics +// configurations for the bucket. You can have up to 1,000 analytics configurations +// per bucket. This action supports list pagination and does not return more than +// 100 configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated is +// set to false. If there are more configurations to list, IsTruncated is set to +// true, and there will be a value in NextContinuationToken . You use the +// NextContinuationToken value to continue the pagination of the list by passing +// the value in continuation-token in the request to GET the next page. To use +// this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . The following operations are related to ListBucketAnalyticsConfigurations : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketAnalyticsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketAnalyticsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketAnalyticsConfigurationsInput struct { + + // The name of the bucket from which analytics configurations are retrieved. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListBucketAnalyticsConfigurationsOutput struct { + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []types.AnalyticsConfiguration + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated *bool + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken . The token is obfuscated and is not a usable value. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketAnalyticsConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListBucketAnalyticsConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBucketAnalyticsConfigurations", + } +} + +// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketAnalyticsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go new file mode 100644 index 000000000..729f87856 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -0,0 +1,229 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Lists the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to ListBucketIntelligentTieringConfigurations include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + if params == nil { + params = &ListBucketIntelligentTieringConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketIntelligentTieringConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + noSmithyDocumentSerde +} + +func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated *bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketIntelligentTieringConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListBucketIntelligentTieringConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBucketIntelligentTieringConfigurations", + } +} + +// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketIntelligentTieringConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go new file mode 100644 index 000000000..6c879048c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns a list of +// inventory configurations for the bucket. You can have up to 1,000 analytics +// configurations per bucket. This action supports list pagination and does not +// return more than 100 configurations at a time. Always check the IsTruncated +// element in the response. If there are no more configurations to list, +// IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there is a value in NextContinuationToken . You +// use the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// The following operations are related to ListBucketInventoryConfigurations : +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { + if params == nil { + params = &ListBucketInventoryConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketInventoryConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketInventoryConfigurationsInput struct { + + // The name of the bucket containing the inventory configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListBucketInventoryConfigurationsOutput struct { + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []types.InventoryConfiguration + + // Tells whether the returned list of inventory configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken is provided for a subsequent request. + IsTruncated *bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketInventoryConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListBucketInventoryConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBucketInventoryConfigurations", + } +} + +// getListBucketInventoryConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketInventoryConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go new file mode 100644 index 000000000..0b6ca9473 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -0,0 +1,238 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Lists the metrics +// configurations for the bucket. The metrics configurations are only for the +// request metrics of the bucket and do not provide information on daily storage +// metrics. You can have up to 1,000 configurations per bucket. This action +// supports list pagination and does not return more than 100 configurations at a +// time. Always check the IsTruncated element in the response. If there are no +// more configurations to list, IsTruncated is set to false. If there are more +// configurations to list, IsTruncated is set to true, and there is a value in +// NextContinuationToken . You use the NextContinuationToken value to continue the +// pagination of the list by passing the value in continuation-token in the +// request to GET the next page. To use this operation, you must have permissions +// to perform the s3:GetMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to ListBucketMetricsConfigurations : +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketMetricsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketMetricsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketMetricsConfigurationsInput struct { + + // The name of the bucket containing the metrics configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker that is used to continue a metrics configuration listing that has + // been truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type ListBucketMetricsConfigurationsOutput struct { + + // The marker that is used as a starting point for this metrics configuration list + // response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of metrics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated *bool + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []types.MetricsConfiguration + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketMetricsConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListBucketMetricsConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBucketMetricsConfigurations", + } +} + +// getListBucketMetricsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketMetricsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go new file mode 100644 index 000000000..086d9d290 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -0,0 +1,167 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns a list of all +// buckets owned by the authenticated sender of the request. To use this operation, +// you must have the s3:ListAllMyBuckets permission. For information about Amazon +// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// . +func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if params == nil { + params = &ListBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketsInput struct { + noSmithyDocumentSerde +} + +type ListBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // The owner of the buckets listed. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBuckets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBuckets", + } +} + +func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go new file mode 100644 index 000000000..3ebf78af1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Regional endpoint. These endpoints +// support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions You must have the +// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy +// instead of a bucket policy. Cross-account access to this API operation isn't +// supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . +func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDirectoryBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDirectoryBucketsInput struct { + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int32 + + noSmithyDocumentSerde +} + +func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) { + + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListDirectoryBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + +// ListDirectoryBucketsPaginatorOptions is the paginator options for +// ListDirectoryBuckets +type ListDirectoryBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets +type ListDirectoryBucketsPaginator struct { + options ListDirectoryBucketsPaginatorOptions + client ListDirectoryBucketsAPIClient + params *ListDirectoryBucketsInput + nextToken *string + firstPage bool +} + +// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator +func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + options := ListDirectoryBucketsPaginatorOptions{} + if params.MaxDirectoryBuckets != nil { + options.Limit = *params.MaxDirectoryBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDirectoryBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDirectoryBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDirectoryBuckets page. +func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxDirectoryBuckets = limit + + result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDirectoryBuckets", + } +} + +func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go new file mode 100644 index 000000000..183773651 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -0,0 +1,412 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the +// CreateMultipartUpload request, but has not yet been completed or aborted. +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. The ListMultipartUploads operation returns a maximum of +// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is +// also the default value. You can further limit the number of uploads in a +// response by specifying the max-uploads request parameter. If there are more +// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, +// the response returns an IsTruncated element with the value of true , a +// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining +// multipart uploads, you need to make subsequent ListMultipartUploads requests. +// In these requests, include two query parameters: key-marker and upload-id-marker +// . Set the value of key-marker to the NextKeyMarker value from the previous +// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker +// value from the previous response. Directory buckets - The upload-id-marker +// element and the NextUploadIdMarker element aren't supported by directory +// buckets. To list the additional multipart uploads, you only need to set the +// value of key-marker to the NextKeyMarker value from the previous response. For +// more information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Sorting of multipart uploads in response +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: +// - Key-based sorting - Multipart uploads are initially sorted in ascending +// order based on their object keys. +// - Time-based sorting - For uploads that share the same object key, they are +// further sorted in ascending order based on the upload initiation time. Among +// uploads with the same key, the one that was initiated first will appear before +// the ones that were initiated later. +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListMultipartUploads : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { + if params == nil { + params = &ListMultipartUploadsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMultipartUploadsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMultipartUploadsInput struct { + + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Character you use to group keys. All keys that contain the same string between + // the prefix, if specified, and the first occurrence of the delimiter after the + // prefix are grouped under a single result element, CommonPrefixes . If you don't + // specify the prefix parameter, then the substring starts at the beginning of the + // key. The keys that are grouped under CommonPrefixes result element are not + // returned elsewhere in the response. Directory buckets - For directory buckets, / + // is the only supported delimiter. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Specifies the multipart upload after which listing should begin. + // - General purpose buckets - For general purpose buckets, key-marker is an + // object key. Together with upload-id-marker , this parameter specifies the + // multipart upload after which listing should begin. If upload-id-marker is not + // specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, any + // multipart uploads for a key equal to the key-marker might also be included, + // provided those multipart uploads have upload IDs lexicographically greater than + // the specified upload-id-marker . + // - Directory buckets - For directory buckets, key-marker is obfuscated and + // isn't a real object key. The upload-id-marker parameter isn't supported by + // directory buckets. To list the additional multipart uploads, you only need to + // set the value of key-marker to the NextKeyMarker value from the previous + // response. In the ListMultipartUploads response, the multipart uploads aren't + // sorted lexicographically based on the object keys. + KeyMarker *string + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + // response body. 1,000 is the maximum number of uploads that can be returned in a + // response. + MaxUploads *int32 + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping of + // keys. (You can think of using prefix to make groups in the same way that you'd + // use a folder in a file system.) Directory buckets - For directory buckets, only + // prefixes that end in a delimiter ( / ) are supported. + Prefix *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker . This functionality is not + // supported for directory buckets. + UploadIdMarker *string + + noSmithyDocumentSerde +} + +func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + +type ListMultipartUploadsOutput struct { + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // If you specify a delimiter in the request, then the result returns each + // distinct key prefix containing the delimiter in a CommonPrefixes element. The + // distinct key prefixes are returned in the Prefix child element. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. + CommonPrefixes []types.CommonPrefix + + // Contains the delimiter you specified in the request. If you don't specify a + // delimiter in your request, this element is absent from the response. Directory + // buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. If you + // specify the encoding-type request parameter, Amazon S3 includes this element in + // the response, and returns encoded key name values in the following response + // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + EncodingType types.EncodingType + + // Indicates whether the returned list of multipart uploads is truncated. A value + // of true indicates that the list was truncated. The list can be truncated if the + // number of multipart uploads exceeds the limit allowed or specified by max + // uploads. + IsTruncated *bool + + // The key at or after which the listing began. + KeyMarker *string + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int32 + + // When a list is truncated, this element specifies the value that should be used + // for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When a list is truncated, this element specifies the value that should be used + // for the upload-id-marker request parameter in a subsequent request. This + // functionality is not supported for directory buckets. + NextUploadIdMarker *string + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // ( / ) are supported. + Prefix *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker . This functionality is not + // supported for directory buckets. + UploadIdMarker *string + + // Container for elements related to a particular multipart upload. A response can + // contain zero or more Upload elements. + Uploads []types.MultipartUpload + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListMultipartUploads"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListMultipartUploadsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListMultipartUploads", + } +} + +// getListMultipartUploadsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListMultipartUploadsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListMultipartUploadsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go new file mode 100644 index 000000000..bcb90eb2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -0,0 +1,315 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns metadata about +// all versions of the objects in a bucket. You can also use request parameters as +// selection criteria to return metadata about a subset of all the object versions. +// To use this operation, you must have permission to perform the +// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK +// response can contain valid or invalid XML. Make sure to design your application +// to parse the contents of the response and handle it appropriately. To use this +// operation, you must have READ access to the bucket. The following operations are +// related to ListObjectVersions : +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { + if params == nil { + params = &ListObjectVersionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectVersionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectVersionsInput struct { + + // The bucket name that contains the objects. + // + // This member is required. + Bucket *string + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes . These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. If additional keys satisfy the search criteria, but + // were not returned because max-keys was exceeded, the response contains true . To + // return the additional keys, see key-marker and version-id-marker . + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []types.OptionalObjectAttributes + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings of + // keys. (You can think of using prefix to make groups in the same way that you'd + // use a folder in a file system.) You can use prefix with delimiter to roll up + // numerous objects into a single result under CommonPrefixes . + Prefix *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the object version you want to start listing from. + VersionIdMarker *string + + noSmithyDocumentSerde +} + +func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + +type ListObjectVersionsOutput struct { + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Container for an object that is a delete marker. + DeleteMarkers []types.DeleteMarkerEntry + + // The delimiter grouping the included keys. A delimiter is a character that you + // specify to group keys. All keys that contain the same string between the prefix + // and the first occurrence of the delimiter are grouped under a single result + // element in CommonPrefixes . These groups are counted as one result against the + // max-keys limitation. These keys are not returned elsewhere in the response. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make a + // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the rest of + // the results. + IsTruncated *bool + + // Marks the last key returned in a truncated response. + KeyMarker *string + + // Specifies the maximum number of objects to return. + MaxKeys *int32 + + // The bucket name. + Name *string + + // When the number of responses exceeds the value of MaxKeys , NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. Use + // this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string + + // Selects objects that start with the value supplied by this parameter. + Prefix *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string + + // Container for version information. + Versions []types.ObjectVersion + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectVersions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListObjectVersionsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListObjectVersions", + } +} + +// getListObjectVersionsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListObjectVersionsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectVersionsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectVersionsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go new file mode 100644 index 000000000..9a3bf3e02 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -0,0 +1,319 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Returns some or all (up +// to 1,000) of the objects in a bucket. You can use the request parameters as +// selection criteria to return a subset of the objects in a bucket. A 200 OK +// response can contain valid or invalid XML. Be sure to design your application to +// parse the contents of the response and handle it appropriately. This action has +// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// , when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects . The following operations are related to ListObjects : +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { + if params == nil { + params = &ListObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsInput struct { + + // The name of the bucket containing the objects. Directory buckets - When you use + // this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // A delimiter is a character that you use to group keys. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []types.OptionalObjectAttributes + + // Limits the response to keys that begin with the specified prefix. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + +type ListObjectsOutput struct { + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. A response can contain + // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if + // there are any) keys between Prefix and the next occurrence of the string + // specified by the delimiter. CommonPrefixes lists keys that act like + // subdirectories in the directory specified by Prefix . For example, if the prefix + // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the + // common prefix is notes/summer/ . All of the keys that roll up into a common + // prefix count as a single return when calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool + + // Indicates where in the bucket listing begins. Marker is included in the + // response if it was sent with the request. + Marker *string + + // The maximum number of keys returned in the response body. + MaxKeys *int32 + + // The bucket name. + Name *string + + // When the response is truncated (the IsTruncated element value in the response + // is true ), you can use the key name in this field as the marker parameter in + // the subsequent request to get the next set of objects. Amazon S3 lists objects + // in alphabetical order. This element is returned only if you have the delimiter + // request parameter specified. If the response does not include the NextMarker + // element and it is truncated, you can use the value of the last Key element in + // the response as the marker parameter in the subsequent request to get the next + // set of object keys. + NextMarker *string + + // Keys that begin with the indicated prefix. + Prefix *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjects"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListObjectsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListObjects", + } +} + +// getListObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go new file mode 100644 index 000000000..ee09d3cbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -0,0 +1,487 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset of +// the objects in a bucket. A 200 OK response can contain valid or invalid XML. +// Make sure to design your application to parse the contents of the response and +// handle it appropriately. For more information about listing objects, see +// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use this operation, you must have +// READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Sorting order of returned objects +// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns +// objects in lexicographical order based on their key names. +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the +// latest revision of this action. We recommend that you use this revised API +// operation for application development. For backward compatibility, Amazon S3 +// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// . The following operations are related to ListObjectsV2 : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if params == nil { + params = &ListObjectsV2Input{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsV2Output) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsV2Input struct { + + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string + + // A delimiter is a character that you use to group keys. + // - Directory buckets - For directory buckets, / is the only supported + // delimiter. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true . Directory buckets - For directory buckets, the bucket owner is + // returned as the object owner for all objects. + FetchOwner *bool + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. This functionality is not supported + // for directory buckets. + OptionalObjectAttributes []types.OptionalObjectAttributes + + // Limits the response to keys that begin with the specified prefix. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request in V2 style. Bucket owners need not specify this parameter in + // their requests. This functionality is not supported for directory buckets. + RequestPayer types.RequestPayer + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. This + // functionality is not supported for directory buckets. + StartAfter *string + + noSmithyDocumentSerde +} + +func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + +type ListObjectsV2Output struct { + + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group of + // keys is considered as one item. A response can contain CommonPrefixes only if + // you specify a delimiter. CommonPrefixes contains all (if there are any) keys + // between Prefix and the next occurrence of the string specified by a delimiter. + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . For example, if the prefix is notes/ and the delimiter is + // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All + // of the keys that roll up into a common prefix count as a single return when + // calculating the number of returns. + // - Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. Directory buckets - For directory buckets, / is the only + // supported delimiter. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: Delimiter, Prefix, Key, and StartAfter . + EncodingType types.EncodingType + + // Set to false if all of the results were returned. Set to true if more keys are + // available to return. If the number of results exceeds that specified by MaxKeys + // , all of the results might not be returned. + IsTruncated *bool + + // KeyCount is the number of keys returned with this request. KeyCount will always + // be less than or equal to the MaxKeys field. For example, if you ask for 50 + // keys, your result will include 50 keys or fewer. + KeyCount *int32 + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // The bucket name. + Name *string + + // NextContinuationToken is sent when isTruncated is true, which means there are + // more keys in the bucket that can be listed. The next list requests to Amazon S3 + // can be continued with this NextContinuationToken . NextContinuationToken is + // obfuscated and is not a real key + NextContinuationToken *string + + // Keys that begin with the indicated prefix. Directory buckets - For directory + // buckets, only prefixes that end in a delimiter ( / ) are supported. + Prefix *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If StartAfter was sent with the request, it is included in the response. This + // functionality is not supported for directory buckets. + StartAfter *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectsV2"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + +// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 +type ListObjectsV2PaginatorOptions struct { + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListObjectsV2Paginator is a paginator for ListObjectsV2 +type ListObjectsV2Paginator struct { + options ListObjectsV2PaginatorOptions + client ListObjectsV2APIClient + params *ListObjectsV2Input + nextToken *string + firstPage bool +} + +// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator +func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator { + if params == nil { + params = &ListObjectsV2Input{} + } + + options := ListObjectsV2PaginatorOptions{} + if params.MaxKeys != nil { + options.Limit = *params.MaxKeys + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsV2Paginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListObjectsV2Paginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListObjectsV2 page. +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxKeys = limit + + result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated != nil && *result.IsTruncated { + p.nextToken = result.NextContinuationToken + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListObjectsV2", + } +} + +// getListObjectsV2BucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsV2BucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsV2Input) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsV2BucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go new file mode 100644 index 000000000..3f3946d93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -0,0 +1,478 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Lists the parts that have been uploaded for a specific multipart upload. To use +// this operation, you must provide the upload ID in the request. You obtain this +// uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// 1,000 parts is also the default value. You can restrict the number of parts in a +// response by specifying the max-parts request parameter. If your multipart +// upload consists of more than 1,000 parts, the response returns an IsTruncated +// field with the value of true , and a NextPartNumberMarker element. To list +// remaining uploaded parts, in subsequent ListParts requests, include the +// part-number-marker query string parameter and set its value to the +// NextPartNumberMarker field value from the previous response. For more +// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must +// have permission to the kms:Decrypt action for the ListParts request to +// succeed. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListParts : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { + if params == nil { + params = &ListPartsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListPartsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListPartsInput struct { + + // The name of the bucket to which the parts are being uploaded. Directory buckets + // - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts *int32 + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type ListPartsOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort + // incomplete multipart uploads and the prefix in the lifecycle rule matches the + // object name in the request, then the response includes this header indicating + // when the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // . The response will also include the x-amz-abort-rule-id header that will + // provide the ID of the lifecycle configuration rule that defines this action. + // This functionality is not supported for directory buckets. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Container element that identifies who initiated the multipart upload. If the + // initiator is an Amazon Web Services account, this element provides the same + // information as the Owner element. If the initiator is an IAM User, this element + // provides the user ARN and display name. + Initiator *types.Initiator + + // Indicates whether the returned list of parts is truncated. A true value + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool + + // Object key for which the multipart upload was initiated. + Key *string + + // Maximum number of parts that were allowed in the response. + MaxParts *int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // Container element that identifies the object owner, after the object is + // created. If multipart upload is initiated by an IAM user, this element provides + // the parent account ID and display name. Directory buckets - The bucket owner is + // returned as the object owner for all the parts. + Owner *types.Owner + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *string + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []types.Part + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // The class of storage used to store the uploaded object. Directory buckets - + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. + StorageClass types.StorageClass + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListParts"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpListPartsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addListPartsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + +// ListPartsPaginatorOptions is the paginator options for ListParts +type ListPartsPaginatorOptions struct { + // Sets the maximum number of parts to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListPartsPaginator is a paginator for ListParts +type ListPartsPaginator struct { + options ListPartsPaginatorOptions + client ListPartsAPIClient + params *ListPartsInput + nextToken *string + firstPage bool +} + +// NewListPartsPaginator returns a new ListPartsPaginator +func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator { + if params == nil { + params = &ListPartsInput{} + } + + options := ListPartsPaginatorOptions{} + if params.MaxParts != nil { + options.Limit = *params.MaxParts + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListPartsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.PartNumberMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListPartsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListParts page. +func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.PartNumberMarker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxParts = limit + + result, err := p.client.ListParts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated != nil && *result.IsTruncated { + p.nextToken = result.NextPartNumberMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListParts", + } +} + +// getListPartsBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getListPartsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListPartsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListPartsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go new file mode 100644 index 000000000..c15d55f1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -0,0 +1,255 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the accelerate +// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a +// bucket-level feature that enables you to perform faster data transfers to Amazon +// S3. To use this operation, you must have permission to perform the +// s3:PutAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// - Enabled – Enables accelerated data transfers to the bucket. +// - Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. After setting the +// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty +// minutes before the data transfer rates to the bucket increase. The name of the +// bucket used for Transfer Acceleration must be DNS-compliant and must not contain +// periods ("."). For more information about transfer acceleration, see Transfer +// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// . The following operations are related to PutBucketAccelerateConfiguration : +// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &PutBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAccelerateConfigurationInput struct { + + // Container for setting the transfer acceleration state. + // + // This member is required. + AccelerateConfiguration *types.AccelerateConfiguration + + // The name of the bucket for which the accelerate configuration is set. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketAccelerateConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAccelerateConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketAccelerateConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketAccelerateConfiguration", + } +} + +// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go new file mode 100644 index 000000000..f88bb4af2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -0,0 +1,343 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the permissions on +// an existing bucket using access control lists (ACL). For more information, see +// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can +// use one of the following two ways to set a bucket's permissions: +// - Specify the ACL in the request body +// - Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. Depending on your application needs, you may choose to set the ACL on a +// bucket using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then you +// can continue to use that approach. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. You must use policies to grant access to your bucket and the +// objects in it. Requests to set ACLs or update ACLs fail and return the +// AccessControlListNotSupported error code. Requests to read ACLs are still +// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Permissions You can set access permissions by using +// one of the following methods: +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a +// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined +// set of grantees and permissions. Specify the canned ACL name as the value of +// x-amz-acl . If you use this header, you cannot use other access +// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) +// . +// - Specify access permissions explicitly with the x-amz-grant-read , +// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use the +// x-amz-acl header to set a canned ACL. These parameters map to the set of +// permissions that Amazon S3 supports in an ACL. For more information, see +// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// . You specify each grantee as a type=value pair, where the type is one of the +// following: +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// - uri – if you are granting permissions to a predefined group +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects +// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. You +// cannot do both. Grantee Values You can specify the person (grantee) to whom +// you're assigning access rights (using request elements) in the following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// The following operations are related to PutBucketAcl : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { + if params == nil { + params = &PutBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAclInput struct { + + // The bucket to which to apply the ACL. + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. + ACL types.BucketCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. + GrantRead *string + + // Allows grantee to read the bucket ACL. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string + + noSmithyDocumentSerde +} + +func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketAclOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketAcl", + } +} + +// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..0604fb930 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -0,0 +1,241 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets an analytics +// configuration for the bucket (specified by the analytics configuration ID). You +// can have up to 1,000 analytics configurations per bucket. You can choose to have +// storage class analysis export analysis reports sent to a comma-separated values +// (CSV) flat file. See the DataExport request element. Reports are updated daily +// and are based on the object filters that you configure. When selecting data +// export, you specify a destination bucket and an optional destination prefix +// where the file is written. You can export the data to a destination bucket in a +// different account. However, the destination bucket must be in the same Region as +// the bucket that you are making the PUT analytics configuration to. For more +// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// . To use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . PutBucketAnalyticsConfiguration has the following special errors: +// - HTTP Error: HTTP 400 Bad Request +// - Code: InvalidArgument +// - Cause: Invalid argument. +// - HTTP Error: HTTP 400 Bad Request +// - Code: TooManyConfigurations +// - Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// - HTTP Error: HTTP 403 Forbidden +// - Code: AccessDenied +// - Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on +// the bucket. +// +// The following operations are related to PutBucketAnalyticsConfiguration : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &PutBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAnalyticsConfigurationInput struct { + + // The configuration and any analyses for the analytics filter. + // + // This member is required. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // The name of the bucket to which an analytics configuration is stored. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketAnalyticsConfiguration", + } +} + +// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go new file mode 100644 index 000000000..3e6604ef6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -0,0 +1,272 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the cors +// configuration for your bucket. If the configuration exists, Amazon S3 replaces +// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. You set this configuration on a bucket so that the bucket can service +// cross-origin requests. For example, you might want to enable a request whose +// origin is http://www.example.com to access your Amazon S3 bucket at +// my.example.bucket.com by using the browser's XMLHttpRequest capability. To +// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which you +// configure rules that identify origins and the HTTP methods that can be executed +// on your bucket. The document is limited to 64 KB in size. When Amazon S3 +// receives a cross-origin request (or a pre-flight OPTIONS request) against a +// bucket, it evaluates the cors configuration on the bucket and uses the first +// CORSRule rule that matches the incoming browser request to enable a cross-origin +// request. For a rule to match, the following conditions must be met: +// - The request's Origin header must match AllowedOrigin elements. +// - The request method (for example, GET, PUT, HEAD, and so on) or the +// Access-Control-Request-Method header in case of a pre-flight OPTIONS request +// must be one of the AllowedMethod elements. +// - Every header specified in the Access-Control-Request-Headers request header +// of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. The following operations are related to +// PutBucketCors : +// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { + if params == nil { + params = &PutBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketCorsInput struct { + + // Specifies the bucket impacted by the cors configuration. + // + // This member is required. + Bucket *string + + // Describes the cross-origin access configuration for objects in an Amazon S3 + // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) + // in the Amazon S3 User Guide. + // + // This member is required. + CORSConfiguration *types.CORSConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketCors", + } +} + +// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketCorsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go new file mode 100644 index 000000000..dfc71dc5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This action uses the +// encryption subresource to configure default encryption and Amazon S3 Bucket Keys +// for an existing bucket. By default, all buckets have a default encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does +// not validate the KMS key ID provided in PutBucketEncryption requests. This +// action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// . To use this operation, you must have permission to perform the +// s3:PutEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. The following operations are related to +// PutBucketEncryption : +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { + if params == nil { + params = &PutBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketEncryptionInput struct { + + // Specifies default encryption for a bucket using server-side encryption with + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by using + // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a + // customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the default server-side-encryption configuration. + // + // This member is required. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the server-side encryption + // configuration. For requests made using the Amazon Web Services Command Line + // Interface (CLI) or Amazon Web Services SDKs, this field is calculated + // automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketEncryption", + } +} + +// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketEncryptionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..61d73da56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -0,0 +1,229 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Puts a S3 +// Intelligent-Tiering configuration to the specified bucket. You can have up to +// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to PutBucketIntelligentTieringConfiguration include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to +// automatically move objects stored in the S3 Intelligent-Tiering storage class to +// the Archive Access or Deep Archive Access tier. +// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP +// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad +// Request Error Code: TooManyConfigurations Cause: You are attempting to create a +// new configuration but have already reached the 1,000-configuration limit. HTTP +// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you +// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set +// the configuration on the bucket. +func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &PutBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Container for S3 Intelligent-Tiering configuration. + // + // This member is required. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + noSmithyDocumentSerde +} + +func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketIntelligentTieringConfiguration", + } +} + +// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go new file mode 100644 index 000000000..03d79a0d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. This implementation of +// the PUT action adds an inventory configuration (identified by the inventory ID) +// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// Amazon S3 inventory generates inventories of the objects in the bucket on a +// daily or weekly basis, and the results are published to a flat file. The bucket +// that is inventoried is called the source bucket, and the bucket where the +// inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. When +// you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate the +// inventory daily or weekly. You can also configure what object metadata to +// include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon S3 User Guide. You must create a bucket policy on the destination +// bucket to grant permissions to Amazon S3 to write objects to the bucket in the +// defined location. For an example policy, see Granting Permissions for Amazon S3 +// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// . Permissions To use this operation, you must have permission to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. The +// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify the +// destination bucket to store the inventory. A user with read access to objects in +// the destination bucket can also access all object metadata fields that are +// available in the inventory report. To restrict access to an inventory report, +// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following +// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid +// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. The following +// operations are related to PutBucketInventoryConfiguration : +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &PutBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketInventoryConfigurationInput struct { + + // The name of the bucket where the inventory configuration will be stored. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Specifies the inventory configuration. + // + // This member is required. + InventoryConfiguration *types.InventoryConfiguration + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketInventoryConfiguration", + } +} + +// getPutBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go new file mode 100644 index 000000000..88096fdd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -0,0 +1,281 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Creates a new lifecycle +// configuration for the bucket or replaces an existing lifecycle configuration. +// Keep in mind that this will overwrite an existing lifecycle configuration, so if +// you want to retain any configuration details, they must be included in the new +// lifecycle configuration. For information about lifecycle configuration, see +// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// . Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API. The +// previous version of the API supported filtering based only on an object key name +// prefix, which is supported for backward compatibility. For the related API +// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// . Rules You specify the lifecycle configuration in your request body. The +// lifecycle configuration is specified as XML consisting of one or more rules. An +// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// - A filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, object size, or any +// combination of these. +// - A status indicating whether the rule is in effect. +// - One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state of +// your bucket is versioning-enabled or versioning-suspended, you can have many +// versions of the same object (one current version and zero or more noncurrent +// versions). Amazon S3 provides predefined actions that you can specify for +// current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) +// . Permissions By default, all Amazon S3 resources are private, including +// buckets, objects, and related subresources (for example, lifecycle configuration +// and website configuration). Only the resource owner (that is, the Amazon Web +// Services account that created it) can access the resource. The resource owner +// can optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration +// permission. You can also explicitly deny permissions. An explicit deny also +// supersedes any other permissions. If you want to block users or accounts from +// removing or deleting objects from your bucket, you must deny them permissions +// for the following actions: +// - s3:DeleteObject +// - s3:DeleteObjectVersion +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to Your +// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to PutBucketLifecycleConfiguration : +// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &PutBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to set the configuration. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *types.BucketLifecycleConfiguration + + noSmithyDocumentSerde +} + +func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketLifecycleConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLifecycleConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketLifecycleConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketLifecycleConfiguration", + } +} + +// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go new file mode 100644 index 000000000..fb80d2ee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -0,0 +1,272 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Set the logging +// parameters for a bucket and to specify permissions for who can view and modify +// the logging parameters. All logs are saved to buckets in the same Amazon Web +// Services Region as the source bucket. To set the logging status of a bucket, you +// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL +// to all logs. You use the Grantee request element to grant access to other +// people. The Permissions request element specifies the kind of access the +// grantee has to the logs. If the target bucket for log delivery uses the bucket +// owner enforced setting for S3 Object Ownership, you can't use the Grantee +// request element to grant access to others. Permissions can only be granted using +// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) +// to whom you're assigning access rights (by using request elements) in the +// following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request. +// - By Email address: <>Grantees@email.com<> The grantee is resolved to the +// CanonicalUser and, in a response to a GETObjectAcl request, appears as the +// CanonicalUser. +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. To +// disable logging, you use an empty BucketLoggingStatus request element: For +// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +// in the Amazon S3 User Guide. For more information about creating a bucket, see +// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// . For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// . The following operations are related to PutBucketLogging : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { + if params == nil { + params = &PutBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLoggingInput struct { + + // The name of the bucket for which to set the logging parameters. + // + // This member is required. + Bucket *string + + // Container for logging status information. + // + // This member is required. + BucketLoggingStatus *types.BucketLoggingStatus + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutBucketLogging request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketLoggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLogging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketLoggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketLogging", + } +} + +// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLoggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go new file mode 100644 index 000000000..bff1452b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -0,0 +1,226 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets a metrics +// configuration (specified by the metrics configuration ID) for the bucket. You +// can have up to 1,000 metrics configurations per bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to PutBucketMetricsConfiguration : +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// PutBucketMetricsConfiguration has the following special error: +// - Error code: TooManyConfigurations +// - Description: You are attempting to create a new configuration but have +// already reached the 1,000-configuration limit. +// - HTTP Status Code: HTTP 400 Bad Request +func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &PutBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketMetricsConfigurationInput struct { + + // The name of the bucket for which the metrics configuration is set. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // This member is required. + Id *string + + // Specifies the metrics configuration. + // + // This member is required. + MetricsConfiguration *types.MetricsConfiguration + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketMetricsConfiguration", + } +} + +// getPutBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go new file mode 100644 index 000000000..e937b5c59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Enables notifications of +// specified events for a bucket. For more information about event notifications, +// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an event +// notification when it detects an event of the specified type. By default, your +// bucket has no event notifications configured. That is, the notification +// configuration will be an empty NotificationConfiguration . This action +// replaces the existing notification configuration with the configuration you +// include in the request body. After Amazon S3 receives this request, it first +// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon +// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner +// has permission to publish to it by sending a test notification. In the case of +// Lambda destinations, Amazon S3 verifies that the Lambda function permissions +// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For +// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . You can disable notifications by adding the empty NotificationConfiguration +// element. For more information about the number of event notification +// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. By default, only the bucket owner can +// configure notifications on a bucket. However, bucket owners can use a bucket +// policy to grant permission to other users to set this configuration with the +// required s3:PutBucketNotification permission. The PUT notification is an atomic +// operation. For example, suppose your notification configuration includes SNS +// topic, SQS queue, and Lambda function configurations. When you send a PUT +// request with this configuration, Amazon S3 sends test messages to your SNS +// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will +// not add the configuration to your bucket. If the configuration in the request +// body includes only one TopicConfiguration specifying only the +// s3:ReducedRedundancyLostObject event type, the response will also include the +// x-amz-sns-test-message-id header containing the message ID of the test +// notification sent to the topic. The following action is related to +// PutBucketNotificationConfiguration : +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &PutBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketNotificationConfigurationInput struct { + + // The name of the bucket. + // + // This member is required. + Bucket *string + + // A container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off for the bucket. + // + // This member is required. + NotificationConfiguration *types.NotificationConfiguration + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or + // false value. + SkipDestinationValidation *bool + + noSmithyDocumentSerde +} + +func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketNotificationConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketNotificationConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutBucketNotificationConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketNotificationConfiguration", + } +} + +// getPutBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go new file mode 100644 index 000000000..94875b755 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -0,0 +1,228 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Creates or modifies +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:PutBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) +// . The following operations are related to PutBucketOwnershipControls : +// - GetBucketOwnershipControls +// - DeleteBucketOwnershipControls +func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { + if params == nil { + params = &PutBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // This member is required. + Bucket *string + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) that you want to apply to this Amazon S3 bucket. + // + // This member is required. + OwnershipControls *types.OwnershipControls + + // The MD5 hash of the OwnershipControls request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketOwnershipControls", + } +} + +func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: nil, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, +func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go new file mode 100644 index 000000000..88e3f2633 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - +// For directory buckets, you must make requests for this API operation to the +// Regional endpoint. These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the PutBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:PutBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following operations are related to PutBucketPolicy : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { + if params == nil { + params = &PutBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketPolicyInput struct { + + // The name of the bucket. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // This member is required. + Bucket *string + + // The bucket policy as a JSON document. For directory buckets, the only IAM + // action supported in the bucket policy is s3express:CreateSession . + // + // This member is required. + Policy *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. This functionality is not supported + // for directory buckets. + ConfirmRemoveSelfBucketAccess *bool + + // The MD5 hash of the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. This functionality is not supported for directory + // buckets. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketPolicy", + } +} + +// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketPolicyInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go new file mode 100644 index 000000000..bf59164c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -0,0 +1,286 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Creates a replication +// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. Specify the replication configuration in the +// request body. In the replication configuration, you provide the name of the +// destination bucket or buckets where you want Amazon S3 to replicate objects, the +// IAM role that Amazon S3 can assume to replicate objects on your behalf, and +// other relevant information. You can invoke this request for a specific Amazon +// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. A replication configuration must include at least one rule, and +// can contain a maximum of 1,000. Each rule identifies a subset of objects to +// replicate by filtering the objects in the source bucket. To choose additional +// subsets of objects to replicate, add a rule for each subset. To specify a subset +// of the objects in the source bucket to apply a replication rule to, add the +// Filter element as a child of the Rule element. You can filter objects based on +// an object key prefix, one or more object tags, or both. When you add the Filter +// element in the configuration, you must also add the following elements: +// DeleteMarkerReplication , Status , and Priority . If you are using an earlier +// version of the replication configuration, Amazon S3 handles replication of +// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) +// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) +// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// replicate objects that are stored at rest using server-side encryption with KMS +// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: +// SourceSelectionCriteria , SseKmsEncryptedObjects , Status , +// EncryptionConfiguration , and ReplicaKmsKeyID . For information about +// replication configuration, see Replicating Objects Created with SSE Using KMS +// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) +// . For information on PutBucketReplication errors, see List of +// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// Permissions To create a PutBucketReplication request, you must have +// s3:PutReplicationConfiguration permissions for the bucket. By default, a +// resource owner, in this case the Amazon Web Services account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. The following operations are related to PutBucketReplication : +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { + if params == nil { + params = &PutBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketReplicationInput struct { + + // The name of the bucket + // + // This member is required. + Bucket *string + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // This member is required. + ReplicationConfiguration *types.ReplicationConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketReplication", + } +} + +// getPutBucketReplicationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketReplicationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go new file mode 100644 index 000000000..07e0f1639 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -0,0 +1,249 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the request payment +// configuration for a bucket. By default, the bucket owner pays for downloads from +// the bucket. This configuration parameter enables the bucket owner (only) to +// specify that the person requesting the download will be charged for the +// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// . The following operations are related to PutBucketRequestPayment : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { + if params == nil { + params = &PutBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketRequestPaymentInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for Payer. + // + // This member is required. + RequestPaymentConfiguration *types.RequestPaymentConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketRequestPaymentOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketRequestPayment"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketRequestPaymentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketRequestPayment", + } +} + +// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go new file mode 100644 index 000000000..0f0a6fd40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -0,0 +1,274 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the tags for a +// bucket. Use tags to organize your Amazon Web Services bill to reflect your own +// cost structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag key +// values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of that +// application across several services. For more information, see Cost Allocation +// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) +// . When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to an +// existing list of tags. To use this operation, you must have permissions to +// perform the s3:PutBucketTagging action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) +// . +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress +// against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to the +// bucket. +// +// The following operations are related to PutBucketTagging : +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { + if params == nil { + params = &PutBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketTaggingInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the TagSet and Tag elements. + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketTagging", + } +} + +// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go new file mode 100644 index 000000000..495725cea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the versioning state +// of an existing bucket. You can set the versioning state with one of the +// following values: Enabled—Enables versioning for the objects in the bucket. All +// objects added to the bucket receive a unique version ID. Suspended—Disables +// versioning for the objects in the bucket. All objects added to the bucket +// receive the version ID null. If the versioning state has never been set on a +// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. In order to enable MFA Delete, +// you must be the bucket owner. If you are the bucket owner and want to enable MFA +// Delete in the bucket versioning configuration, you must include the x-amz-mfa +// request header and the Status and the MfaDelete request elements in a request +// to set the versioning state of the bucket. If you have an object expiration +// lifecycle configuration in your non-versioned bucket and you want to maintain +// the same permanent delete behavior when you enable versioning, you must add a +// noncurrent expiration policy. The noncurrent expiration lifecycle configuration +// will manage the deletes of the noncurrent object versions in the version-enabled +// bucket. (A version-enabled bucket maintains one current and zero or more +// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) +// . The following operations are related to PutBucketVersioning : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { + if params == nil { + params = &PutBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketVersioningInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for setting the versioning state. + // + // This member is required. + VersioningConfiguration *types.VersioningConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. + MFA *string + + noSmithyDocumentSerde +} + +func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketVersioningOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketVersioning"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketVersioningInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketVersioning", + } +} + +// getPutBucketVersioningRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketVersioningInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go new file mode 100644 index 000000000..08c8a582f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -0,0 +1,284 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the configuration of +// the website that is specified in the website subresource. To configure a bucket +// as a website, you can add this subresource on the bucket with website +// configuration information such as the file name of the index document and any +// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// the bucket owner can configure the website attached to a bucket; however, bucket +// owners can allow other users to set the website configuration by writing a +// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect +// all website requests sent to the bucket's website endpoint, you add a website +// configuration with the following elements. Because all requests are sent to +// another website, you don't need to provide index document name for the bucket. +// - WebsiteConfiguration +// - RedirectAllRequestsTo +// - HostName +// - Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website +// configuration must provide an index document for the bucket, because some +// requests might not be redirected. +// - WebsiteConfiguration +// - IndexDocument +// - Suffix +// - ErrorDocument +// - Key +// - RoutingRules +// - RoutingRule +// - Condition +// - HttpErrorCodeReturnedEquals +// - KeyPrefixEquals +// - Redirect +// - Protocol +// - HostName +// - ReplaceKeyPrefixWith +// - ReplaceKeyWith +// - HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. If +// you require more than 50 routing rules, you can use object redirect. For more +// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. +func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { + if params == nil { + params = &PutBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketWebsiteInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + WebsiteConfiguration *types.WebsiteConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutBucketWebsite", + } +} + +// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketWebsiteInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go new file mode 100644 index 000000000..d57e0026e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -0,0 +1,726 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Adds an object to a bucket. +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject to +// only update a single piece of metadata for an existing object. You must put the +// entire object with updated metadata if you want to update some values. +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written to the +// bucket by any account will be owned by the bucket owner. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Amazon S3 is a distributed system. If it receives multiple write requests for +// the same object simultaneously, it overwrites all but the last object written. +// However, Amazon S3 provides features that can modify this behavior: +// - S3 Object Lock - To prevent objects from being deleted or overwritten, you +// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it stores +// all versions of the objects. For each write request that is made to the same +// object, Amazon S3 automatically generates a unique version ID of that object +// being stored in Amazon S3. You can retrieve, replace, or delete any version of +// the object. For more information about versioning, see Adding Objects to +// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// . This functionality is not supported for directory buckets. +// +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// - s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to it. +// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl . +// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject +// request, you must have the s3:PutObjectTagging . +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Data integrity with Content-MD5 +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon S3 +// checks the object against the provided MD5 value and, if they do not match, +// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 +// digest, you can calculate the MD5 while putting the object to Amazon S3 and +// compare the returned ETag to the calculated MD5 value. +// - Directory bucket - This functionality is not supported for directory +// buckets. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about +// related Amazon S3 APIs, see the following: +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { + if params == nil { + params = &PutObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectInput struct { + + // The bucket name to which the PUT action was initiated. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the PUT action was initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. When adding a new object, you can use headers to + // grant ACL-based permissions to individual Amazon Web Services accounts or to + // predefined groups defined by Amazon S3. These permissions are then added to the + // ACL on the object. By default, all objects are private. Only the owner has full + // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses + // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and + // no longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. PUT requests that + // contain other ACLs (for example, custom grants to certain Amazon Web Services + // accounts) fail and return a 400 error with the error code + // AccessControlListNotSupported . For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + ACL types.ObjectCannedACL + + // Object data. + Body io.Reader + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect + // bucket-level settings for S3 Bucket Key. This functionality is not supported for + // directory buckets. + BucketKeyEnabled *bool + + // Can be used to specify caching behavior along the request/reply chain. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) + // . + CacheControl *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. For more information, see + // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) + // . + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For more information, see + // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) + // . + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. For more information, see + // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) + // . + ContentLength *int64 + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check to + // verify that the data is the same data that was originally sent. Although it is + // optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, see + // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // . The Content-MD5 header is required for any request to upload an object with a + // retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + ContentMD5 *string + + // A standard MIME type describing the format of the contents. For more + // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) + // . + ContentType *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. For more + // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) + // . + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether a legal hold will be applied to this object. For more + // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to this object. This functionality + // is not supported for directory buckets. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want this object's Object Lock to expire. Must be + // formatted as a timestamp parameter. This functionality is not supported for + // directory buckets. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value is stored as object + // metadata and automatically gets passed on to Amazon Web Services KMS for future + // GetObject or CopyObject operations on this object. This value must be + // explicitly added during CopyObject operations. This functionality is not + // supported for directory buckets. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key + // Management Service (KMS) symmetric encryption customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms or + // x-amz-server-side-encryption:aws:kms:dsse , but do not provide + // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not + // exist in the same account that's issuing the command, you must use the full ARN + // and not just the ID. This functionality is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose + // buckets - You have four mutually exclusive options to protect data using + // server-side encryption in Amazon S3, depending on how you choose to manage the + // encryption keys. Specifically, the encryption key options are Amazon S3 managed + // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and + // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side + // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can + // optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is + // supported. + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query + // parameters. (For example, "Key1=Value1") This functionality is not supported for + // directory buckets. + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. In the following example, the request header sets + // the redirect to an object (anotherPage.html) in the same bucket: + // x-amz-website-redirect-location: /anotherPage.html In the following example, the + // request header sets the object redirect to another website: + // x-amz-website-redirect-location: http://www.example.com/ For more information + // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type PutObjectOutput struct { + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. General purpose buckets - To ensure that + // data is not corrupted traversing the network, for objects where the ETag is the + // MD5 digest of the object, you can calculate the MD5 while putting an object to + // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory + // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of + // the object. + ETag *string + + // If the expiration is configured for the object (see + // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ) in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. This functionality + // is not supported for directory buckets. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. This value is stored + // as object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This + // functionality is not supported for directory buckets. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the object. If you enable versioning for a bucket, Amazon S3 + // automatically generates a unique version ID for the object being stored. Amazon + // S3 returns this ID in the response. When you enable versioning for a bucket, if + // Amazon S3 receives multiple write requests for the same object simultaneously, + // it stores all of the objects. For more information about versioning, see Adding + // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) + // . This functionality is not supported for directory buckets. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = add100Continue(stack, options); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *PutObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObject", + } +} + +// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getPutObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignPutObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addPutObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go new file mode 100644 index 000000000..08fea12c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -0,0 +1,385 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Uses the acl subresource +// to set the access control list (ACL) permissions for a new or existing object in +// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an +// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 +// on Outposts. Depending on your application needs, you can choose to set the ACL +// on an object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request body, +// you can continue to use that approach. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. You must use policies to grant access to your bucket and the +// objects in it. Requests to set ACLs or update ACLs fail and return the +// AccessControlListNotSupported error code. Requests to read ACLs are still +// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Permissions You can set access permissions using +// one of the following methods: +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a +// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined +// set of grantees and permissions. Specify the canned ACL name as the value of +// x-amz-ac l. If you use this header, you cannot use other access +// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) +// . +// - Specify access permissions explicitly with the x-amz-grant-read , +// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// . You specify each grantee as a type=value pair, where the type is one of the +// following: +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// - uri – if you are granting permissions to a predefined group +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. You +// cannot do both. Grantee Values You can specify the person (grantee) to whom +// you're assigning access rights (using request elements) in the following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request. +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved +// to the CanonicalUser and, in a response to a GET Object acl request, appears as +// the CanonicalUser. Using email addresses to specify a grantee is only supported +// in the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// Versioning The ACL of an object is set at the object version level. By default, +// PUT sets the ACL of the current version of an object. To set the ACL of a +// different version, use the versionId subresource. The following operations are +// related to PutObjectAcl : +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { + if params == nil { + params = &PutObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectAclInput struct { + + // The bucket name that contains the object to which you want to attach the ACL. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key for which the PUT action was initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // . + ACL types.ObjectCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the bucket ACL. This functionality is not supported for + // Amazon S3 on Outposts. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for Amazon S3 on Outposts. + GrantWriteACP *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type PutObjectAclOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutObjectAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObjectAcl", + } +} + +// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go new file mode 100644 index 000000000..cc23509f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -0,0 +1,272 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Applies a legal hold +// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. +func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { + if params == nil { + params = &PutObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLegalHoldInput struct { + + // The bucket name containing the object that you want to place a legal hold on. + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to place a legal hold on. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *types.ObjectLockLegalHold + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The version ID of the object that you want to place a legal hold on. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type PutObjectLegalHoldOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLegalHold"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutObjectLegalHoldInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObjectLegalHold", + } +} + +// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLegalHoldInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go new file mode 100644 index 000000000..358ececc6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -0,0 +1,265 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Places an Object Lock +// configuration on the specified bucket. The rule specified in the Object Lock +// configuration will be applied by default to every new object placed in the +// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . +// - The DefaultRetention settings require both a mode and a period. +// - The DefaultRetention period can be either Days or Years but you must select +// one. You cannot specify Days and Years at the same time. +// - You can enable Object Lock for new or existing buckets. For more +// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) +// . +func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { + if params == nil { + params = &PutObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to create or replace. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type PutObjectLockConfigurationOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLockConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutObjectLockConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObjectLockConfiguration", + } +} + +// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, +func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go new file mode 100644 index 000000000..eb787de48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -0,0 +1,279 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Places an Object +// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . Users or accounts require the s3:PutObjectRetention permission in order to +// place an Object Retention configuration on objects. Bypassing a Governance +// Retention configuration requires the s3:BypassGovernanceRetention permission. +// This functionality is not supported for Amazon S3 on Outposts. +func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { + if params == nil { + params = &PutObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectRetentionInput struct { + + // The bucket name that contains the object you want to apply this Object + // Retention configuration to. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // This member is required. + Key *string + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The container element for the Object Retention configuration. + Retention *types.ObjectLockRetention + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type PutObjectRetentionOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectRetention"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutObjectRetentionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObjectRetention", + } +} + +// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectRetentionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go new file mode 100644 index 000000000..2768db502 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Sets the supplied tag-set +// to an object that already exists in a bucket. A tag is a key-value pair. For +// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// . You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve tags by +// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// . For tagging-related restrictions related to characters and encodings, see Tag +// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// To use this operation, you must have permission to perform the +// s3:PutObjectTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. To put tags of any other version, use the +// versionId query parameter. You also need permission for the +// s3:PutObjectVersionTagging action. PutObjectTagging has the following special +// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// . +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress +// against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to the +// object. +// +// The following operations are related to PutObjectTagging : +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { + if params == nil { + params = &PutObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectTaggingInput struct { + + // The bucket name containing the object. Access points - When you use this action + // with an access point, you must provide the alias of the access point in place of + // the bucket name or specify the access point ARN. When using the access point + // ARN, you must direct requests to the access point hostname. The access point + // hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Name of the object key. + // + // This member is required. + Key *string + + // Container for the TagSet and Tag elements + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // The versionId of the object that the tag-set will be added to. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type PutObjectTaggingOutput struct { + + // The versionId of the object the tag-set was added to. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutObjectTagging", + } +} + +// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go new file mode 100644 index 000000000..7e6d0788e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Creates or modifies the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or +// an object, it checks the PublicAccessBlock configuration for both the bucket +// (or the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock configurations are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to PutPublicAccessBlock : +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { + if params == nil { + params = &PutPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to set. + // + // This member is required. + Bucket *string + + // The PublicAccessBlock configuration that you want to apply to this Amazon S3 + // bucket. You can enable the configuration options in any combination. For more + // information about when Amazon S3 considers a bucket or object public, see The + // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // This member is required. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutPublicAccessBlock request body. For requests made using + // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services + // SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type PutPublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutPublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + return nil +} + +func (v *PutPublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutPublicAccessBlock", + } +} + +// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutPublicAccessBlockInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*PutPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go new file mode 100644 index 000000000..3b6aad85b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -0,0 +1,387 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation is not supported by directory buckets. Restores an archived copy +// of an object back into Amazon S3 This functionality is not supported for Amazon +// S3 on Outposts. This action performs the following types of requests: +// - restore an archive - Restore an archived object +// +// For more information about the S3 structure in the request body, see the +// following: +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// Permissions To use this operation, you must have permissions to perform the +// s3:RestoreObject action. The bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the +// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive +// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers, are not accessible in real time. For objects in the S3 Glacier +// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +// classes, you must first initiate a restore request, and then wait until a +// temporary copy of the object is available. If you want a permanent copy of the +// object, create a copy of it in the Amazon S3 Standard storage class in your S3 +// bucket. To access an archived object, you must restore the object for the +// duration (number of days) that you specify. For objects in the Archive Access or +// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a +// restore request, and then wait until the object is moved into the Frequent +// Access tier. To restore a specific object version, you can provide a version ID. +// If you don't provide a version ID, Amazon S3 restores the current version. When +// restoring an archived object, you can specify one of the following data access +// tier options in the Tier element of the request body: +// - Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or +// S3 Intelligent-Tiering Archive tier when occasional urgent requests for +// restoring archives are required. For all but the largest archived objects (250 +// MB+), data accessed using Expedited retrievals is typically made available +// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for +// Expedited retrievals is available when you need it. Expedited retrievals and +// provisioned capacity are not available for objects stored in the S3 Glacier Deep +// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// - Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval requests +// that do not specify the retrieval option. Standard retrievals typically finish +// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval +// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They +// typically finish within 12 hours for objects stored in the S3 Glacier Deep +// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard +// retrievals are free for objects stored in S3 Intelligent-Tiering. +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve +// large amounts, even petabytes, of data at no cost. Bulk retrievals typically +// finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval +// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk +// retrievals are also the lowest-cost retrieval option when restoring objects from +// S3 Glacier Deep Archive. They typically finish within 48 hours for objects +// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to +// change the restore speed to a faster speed while it is in progress. For more +// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. To get the status of object restoration, you can +// send a HEAD request. Operations return the x-amz-restore header, which provides +// information about the restoration status, in the response. You can use Amazon S3 +// event notifications to notify you when a restore is initiated or completed. For +// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. After restoring an archived object, you can update +// the restoration period by reissuing the request with a new period. Amazon S3 +// updates the restoration period relative to the current time and charges only for +// the request-there are no data transfer charges. You cannot update the +// restoration period when Amazon S3 is actively processing your current restore +// request for the object. If your bucket has a lifecycle configuration with a rule +// that includes an expiration action, the object expiration overrides the life +// span that you specify in a restore request. For example, if you restore an +// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon +// S3 deletes the object in 3 days. For more information about lifecycle +// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK +// or 202 Accepted status code. +// +// - If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// - If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// - Special errors: +// +// - Code: RestoreAlreadyInProgress +// +// - Cause: Object restore is already in progress. +// +// - HTTP Status Code: 409 Conflict +// +// - SOAP Fault Code Prefix: Client +// +// - Code: GlacierExpeditedRetrievalNotAvailable +// +// - Cause: expedited retrievals are currently not available. Try again later. +// (Returned if there is insufficient capacity to process the Expedited request. +// This error applies only to Expedited retrievals and not to S3 Standard or Bulk +// retrievals.) +// +// - HTTP Status Code: 503 +// +// - SOAP Fault Code Prefix: N/A +// +// The following operations are related to RestoreObject : +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { + if params == nil { + params = &RestoreObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RestoreObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RestoreObjectInput struct { + + // The bucket name containing the object to restore. Access points - When you use + // this action with an access point, you must provide the alias of the access point + // in place of the bucket name or specify the access point ARN. When using the + // access point ARN, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the action was initiated. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Container for restore job parameters. + RestoreRequest *types.RestoreRequest + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type RestoreObjectOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Indicates the path in the provided S3 output location where Select results will + // be restored to. + RestoreOutputPath *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *RestoreObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RestoreObject", + } +} + +// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*RestoreObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getRestoreObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getRestoreObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getRestoreObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*RestoreObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getRestoreObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go new file mode 100644 index 000000000..f69db696a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -0,0 +1,422 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + "sync" +) + +// This operation is not supported by directory buckets. This action filters the +// contents of an Amazon S3 object based on a simple structured query language +// (SQL) statement. In the request, along with the SQL expression, you must also +// specify a data serialization format (JSON, CSV, or Apache Parquet) of the +// object. Amazon S3 uses this format to parse object data into records, and +// returns only records that match the specified SQL expression. You must also +// specify the data serialization format for the response. This functionality is +// not supported for Amazon S3 on Outposts. For more information about Amazon S3 +// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject +// permission for this operation. Amazon S3 Select does not support anonymous +// access. For more information about permissions, see Specifying Permissions in a +// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to +// query objects that have the following format properties: +// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports +// for CSV and JSON files. Amazon S3 Select supports columnar compression for +// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// - Server-side encryption - Amazon S3 Select supports querying objects that +// are protected with server-side encryption. For objects that are encrypted with +// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use +// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// . For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 +// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. For +// more information about server-side encryption, including SSE-S3 and SSE-KMS, see +// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. +// +// Working with the Response Body Given the response size is unknown, Amazon S3 +// Select streams the response as a series of messages and includes a +// Transfer-Encoding header with chunked as its value in the response. For more +// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) +// . GetObject Support The SelectObjectContent action does not support the +// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// . +// - Range : Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an object +// to return. +// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the +// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or +// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or +// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For +// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) +// in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for this operation, see List of +// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// The following operations are related to SelectObjectContent : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { + if params == nil { + params = &SelectObjectContentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SelectObjectContentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the SQL +// expression, you must specify a data serialization format (JSON or CSV) of the +// object. Amazon S3 uses this to parse object data into records. It returns only +// records that match the specified SQL expression. You must also specify the data +// serialization format for the response. For more information, see S3Select API +// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) +// . +type SelectObjectContentInput struct { + + // The S3 bucket. + // + // This member is required. + Bucket *string + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType types.ExpressionType + + // Describes the format of the data in the object that is being queried. + // + // This member is required. + InputSerialization *types.InputSerialization + + // The object key. + // + // This member is required. + Key *string + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // This member is required. + OutputSerialization *types.OutputSerialization + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Specifies if periodic request progress information should be enabled. + RequestProgress *types.RequestProgress + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + // Specifies the byte range of the object to get the records from. A record is + // processed when its first byte is contained by the range. This parameter is + // optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. ScanRange may be + // used in the following ways: + // - 50100 - process only the records starting between the bytes 50 and 100 + // (inclusive, counting from zero) + // - 50 - process only the records starting after the byte 50 + // - 50 - process only the records within the last 50 bytes of the file. + ScanRange *types.ScanRange + + noSmithyDocumentSerde +} + +func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + +type SelectObjectContentOutput struct { + eventStream *SelectObjectContentEventStream + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +// GetStream returns the type to interact with the event stream. +func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return o.eventStream +} + +func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "SelectObjectContent"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *SelectObjectContentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SelectObjectContent", + } +} + +// getSelectObjectContentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getSelectObjectContentBucketMember(input interface{}) (*string, bool) { + in := input.(*SelectObjectContentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getSelectObjectContentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + // SelectObjectContentEventStreamReader is the EventStream reader for the + // SelectObjectContentEventStream events. This value is automatically set by the + // SDK when the API call is made Use this member when unit testing your code with + // the SDK to mock out the EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + done chan struct{} + closeOnce sync.Once + err *smithysync.OnceErr +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: smithysync.NewOnceErr(), + } + for _, fn := range optFns { + fn(es) + } + return es +} + +// Events returns a channel to read events from. +func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream { + return es.Reader.Events() +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// Will close the underlying EventStream writer and reader, and no more events can be +// sent or received. +func (es *SelectObjectContentEventStream) Close() error { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + close(es.done) + + es.Reader.Close() +} + +// Err returns any error that occurred while reading or writing EventStream Events +// from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +func (es *SelectObjectContentEventStream) waitStreamClose() { + type errorSet interface { + ErrorSet() <-chan struct{} + } + + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(errorSet); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go new file mode 100644 index 000000000..ff7319794 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -0,0 +1,544 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Uploads a part in a multipart upload. In this operation, you provide new data +// as a part of an object in your request. However, you have an option to specify +// your existing Amazon S3 object as a data source for the part you are uploading. +// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// ) before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier that you must include in your +// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. +// A part number uniquely identifies a part and also defines its position within +// the object being created. If you upload a new part using the same part number +// that was used with a previous part, the previously uploaded part is overwritten. +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. After you initiate multipart upload and upload one +// or more parts, you must either complete or abort multipart upload in order to +// stop getting charged for storage of the uploaded parts. Only after you either +// complete or abort multipart upload, Amazon S3 frees up the parts storage and +// stops charging you for the parts storage. For more information on multipart +// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you +// must make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Data integrity General purpose bucket - To ensure that data is not corrupted +// traversing the network, specify the Content-MD5 header in the upload part +// request. Amazon S3 checks the part data against the provided MD5 value. If they +// do not match, Amazon S3 returns an error. If the upload request is signed with +// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 +// header as a checksum instead of Content-MD5 . For more information see +// Authenticating Requests: Using the Authorization Header (Amazon Web Services +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) +// . Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. Encryption +// - General purpose bucket - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You have mutually exclusive options to +// protect data using server-side encryption in Amazon S3, depending on how you +// choose to manage the encryption keys. Specifically, the encryption key options +// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and +// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side +// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally +// tell Amazon S3 to encrypt data at rest using server-side encryption with other +// key options. The option you use depends on whether you want to use KMS keys +// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is +// supported by the S3 Multipart Upload operations. Unless you are using a +// customer-provided encryption key (SSE-C), you don't need to specify the +// encryption parameters in each UploadPart request. Instead, you only need to +// specify the server-side encryption parameters in the initial Initiate Multipart +// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . If you request server-side encryption using a customer-provided encryption key +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 +// - Directory bucket - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found +// - SOAP Fault Code Prefix: Client +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPart : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { + if params == nil { + params = &UploadPartInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartInput struct { + + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being uploaded. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber *int32 + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // This member is required. + UploadId *string + + // Object data. + Body io.Reader + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must + // be the same for all parts and it match the checksum value supplied in the + // CreateMultipartUpload request. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. + ContentLength *int64 + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. This functionality is not supported for + // directory buckets. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header . This must be the same + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported for directory buckets. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + +type UploadPartOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPart"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpUploadPartValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = add100Continue(stack, options); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addUploadPartUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *UploadPartInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UploadPart", + } +} + +// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*UploadPartInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getUploadPartRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getUploadPartBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getUploadPartBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignUploadPart is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &UploadPartInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, + c.client.addOperationUploadPartMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addUploadPartPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go new file mode 100644 index 000000000..d42dc60cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -0,0 +1,494 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in your +// request. To specify a byte range, you add the request header +// x-amz-copy-source-range in your request. For information about maximum and +// minimum part sizes and other multipart upload specifications, see Multipart +// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. Instead of copying data from an existing object as +// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. You must +// initiate a multipart upload before you can upload any part. In response to your +// initiate request, Amazon S3 returns the upload ID, a unique identifier that you +// must include in your upload part request. For conceptual information about +// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using a +// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions You must have READ access to the source object and WRITE access to +// the destination bucket. +// - General purpose bucket permissions - You must have the permissions in a +// policy based on the bucket types of your source bucket and destination bucket in +// an UploadPartCopy operation. +// - If the source object is in a general purpose bucket, you must have the +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have the +// s3:PutObject permission to write the object copy to the destination bucket. +// For information about permissions required to use the multipart upload API, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in an UploadPartCopy operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object . By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key cannot +// be set to ReadOnly on the copy destination. For example policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// Encryption +// - General purpose buckets - For information about using server-side +// encryption with customer-provided encryption keys with the UploadPartCopy +// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// . +// - Directory buckets - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found +// - Error Code: InvalidRequest +// - Description: The specified copy source is not supported as a byte-range +// copy source. +// - HTTP Status Code: 400 Bad Request +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPartCopy : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { + if params == nil { + params = &UploadPartCopyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartCopyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartCopyInput struct { + + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. You specify the value in + // one of two formats, depending on whether you want to access the source object + // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) + // : + // - For objects not accessed through an access point, specify the name of the + // source bucket and key of the source object, separated by a slash (/). For + // example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must + // be URL-encoded. + // - For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/ . For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + // . The value must be URL-encoded. + // If your bucket has versioning enabled, you could have multiple versions of the + // same object. By default, x-amz-copy-source identifies the current version of + // the source object to copy. To copy a specific version of the source object to + // copy, append ?versionId= to the x-amz-copy-source request header (for example, + // x-amz-copy-source: + // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + // ). If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an + // HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source . Directory buckets - S3 + // Versioning isn't enabled and supported for directory buckets. + // + // This member is required. + CopySource *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber *int32 + + // Upload ID identifying the multipart upload whose part is being copied. + // + // This member is required. + UploadId *string + + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. If both of + // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: x-amz-copy-source-if-none-match + // condition evaluates to false , and; x-amz-copy-source-if-modified-since + // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed + // response code. + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; + // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 + // returns 412 Precondition Failed response code. + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + CopySourceIfUnmodifiedSince *time.Time + + // The range of bytes to copy from the source object. The range value must use the + // form bytes=first-last, where the first and last are the zero-based byte offsets + // to copy. For example, bytes=0-9 indicates that you want to copy the first 10 + // bytes of the source. You can copy a range only if the source object is greater + // than 5 MB. + CopySourceRange *string + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256 ). This functionality is not supported when the source object is in a + // directory bucket. + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one that + // was used when the source object was created. This functionality is not supported + // when the source object is in a directory bucket. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported when the source object is in a directory bucket. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported when the destination bucket is a + // directory bucket. + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This must be the same + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + +type UploadPartCopyOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool + + // Container for all response elements. + CopyPartResult *types.CopyPartResult + + // The version of the source object that was copied, if you have enabled + // versioning on the source bucket. This functionality is not supported when the + // source object is in a directory bucket. + CopySourceVersionId *string + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPartCopy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *UploadPartCopyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UploadPartCopy", + } +} + +// getUploadPartCopyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getUploadPartCopyBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartCopyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartCopyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go new file mode 100644 index 000000000..e181ab711 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -0,0 +1,459 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" + "time" +) + +// This operation is not supported by directory buckets. Passes transformed +// objects to a GetObject operation when using Object Lambda access points. For +// information about Object Lambda access points, see Transforming objects with +// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. This operation supports metadata that can be +// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and +// ErrorMessage . The GetObject response metadata is supported so that the +// WriteGetObjectResponse caller, typically an Lambda function, can provide the +// same metadata when it internally invokes GetObject . When WriteGetObjectResponse +// is called by a customer-owned Lambda function, the metadata returned to the end +// user GetObject call might differ from what Amazon S3 would normally return. You +// can include any number of metadata headers. When including a metadata header, it +// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: +// MyCustomValue . The primary use case for this is to forward GetObject metadata. +// Amazon Web Services provides some prebuilt Lambda functions that you can use +// with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in the +// Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. Example 1: PII Access Control - This Lambda function uses +// Amazon Comprehend, a natural language processing (NLP) service using machine +// learning to find insights and relationships in text. It automatically detects +// personally identifiable information (PII) such as names, addresses, dates, +// credit card numbers, and social security numbers from documents in your Amazon +// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon +// Comprehend, a natural language processing (NLP) service using machine learning +// to find insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is +// equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information +// on how to view and use these functions, see Using Amazon Web Services built +// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { + if params == nil { + params = &WriteGetObjectResponseInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*WriteGetObjectResponseOutput) + out.ResultMetadata = metadata + return out, nil +} + +type WriteGetObjectResponseInput struct { + + // Route prefix to the HTTP URL generated. + // + // This member is required. + RequestRoute *string + + // A single use encrypted token that maps WriteGetObjectResponse to the end user + // GetObject request. + // + // This member is required. + RequestToken *string + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The object data. + Body io.Reader + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // The size of the content body in bytes. + ContentLength *int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) + // a delete marker. + DeleteMarker *bool + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string + + // A string that uniquely identifies an error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with + // a successful StatusCode header or when the transformed object is provided in + // the body. All error codes from S3 are sentence-cased. The regular expression + // (regex) value is "^[A-Z][a-zA-Z]+$" . + ErrorCode *string + + // Contains a generic description of the error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with + // a successful StatusCode header or when the transformed object is provided in + // body. + ErrorMessage *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // The date and time that the object was last modified. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Set to the number of metadata entries not returned in x-amz-meta headers. This + // can happen if you create metadata using an API like SOAP that supports more + // flexible metadata than the REST API. For example, using SOAP, you can create + // metadata whose values are not legal HTTP headers. + MissingMeta *int32 + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For + // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) + // . + ObjectLockMode types.ObjectLockMode + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. + PartsCount *int32 + + // Indicates if request involves bucket that is either a source or destination in + // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) + // . + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Provides information about object restoration operation and expiration time of + // the restored object copy. + Restore *string + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) + // . + SSECustomerKeyMD5 *string + + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms ). + ServerSideEncryption types.ServerSideEncryption + + // The integer status code for an HTTP response of a corresponding GetObject + // request. The following is a list of status codes. + // - 200 - OK + // - 206 - Partial Content + // - 304 - Not Modified + // - 400 - Bad Request + // - 401 - Unauthorized + // - 403 - Forbidden + // - 404 - Not Found + // - 405 - Method Not Allowed + // - 409 - Conflict + // - 411 - Length Required + // - 412 - Precondition Failed + // - 416 - Range Not Satisfiable + // - 500 - Internal Server Error + // - 503 - Service Unavailable + StatusCode *int32 + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . + StorageClass types.StorageClass + + // The number of tags, if any, on the object. + TagCount *int32 + + // An ID used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +func (in *WriteGetObjectResponseInput) bindEndpointParams(p *EndpointParameters) { + + p.UseObjectLambdaEndpoint = ptr.Bool(true) +} + +type WriteGetObjectResponseOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "WriteGetObjectResponse"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addUnsignedPayload(stack); err != nil { + return err + } + if err = addContentSHA256Header(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { + return err + } + if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +type endpointPrefix_opWriteGetObjectResponseMiddleware struct { +} + +func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { + return "EndpointHostPrefix" +} + +func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + opaqueInput := getOperationInput(ctx) + input, ok := opaqueInput.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input type %T", opaqueInput) + } + + var prefix strings.Builder + if input.RequestRoute == nil { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")} + } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)} + } else { + prefix.WriteString(*input.RequestRoute) + } + prefix.WriteString(".") + req.URL.Host = prefix.String() + req.URL.Host + + return next.HandleFinalize(ctx, in) +} +func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, "ResolveEndpointV2", middleware.After) +} + +func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "WriteGetObjectResponse", + } +} + +func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: true, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go new file mode 100644 index 000000000..6ef631bd3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -0,0 +1,318 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(input, options) +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The endpoint resolver parameters for this operation. This service's default + // resolver delegates to endpoint rules. + endpointParams *EndpointParameters + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthEndpointParams(params, input, options) + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "WriteGetObjectResponse": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "s3") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + smithyhttp.SetIsUnsignedPayload(&props, true) + return props + }(), + }, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "s3") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + + { + SchemeID: smithyauth.SchemeIDSigV4A, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4ASigningName(&props, "s3") + smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region}) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go new file mode 100644 index 000000000..860af056a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" +) + +// putBucketContextMiddleware stores the input bucket name within the request context (if +// present) which is required for a variety of custom S3 behaviors +type putBucketContextMiddleware struct{} + +func (*putBucketContextMiddleware) ID() string { + return "putBucketContext" +} + +func (m *putBucketContextMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if bucket, ok := m.bucketFromInput(in.Parameters); ok { + ctx = customizations.SetBucket(ctx, bucket) + } + return next.HandleSerialize(ctx, in) +} + +func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) { + v, ok := params.(bucketer) + if !ok { + return "", false + } + + return v.bucket() +} + +func addPutBucketContextMiddleware(stack *middleware.Stack) error { + // This is essentially a post-Initialize task - only run it once the input + // has received all modifications from that phase. Therefore we add it as + // an early Serialize step. + // + // FUTURE: it would be nice to have explicit phases that only we as SDK + // authors can hook into (such as between phases like this really should + // be) + return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go new file mode 100644 index 000000000..4e7f7e24e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go @@ -0,0 +1,15 @@ +package s3 + +// implemented by all S3 input structures +type bucketer interface { + bucket() (string, bool) +} + +func bucketFromInput(params interface{}) (string, bool) { + v, ok := params.(bucketer) + if !ok { + return "", false + } + + return v.bucket() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go new file mode 100644 index 000000000..5803b9e45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "context" + "fmt" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// backfills checksum algorithm onto the context for CreateMultipart upload so +// transfer manager can set a checksum header on the request accordingly for +// s3express requests +type setCreateMPUChecksumAlgorithm struct{} + +func (*setCreateMPUChecksumAlgorithm) ID() string { + return "setCreateMPUChecksumAlgorithm" +} + +func (*setCreateMPUChecksumAlgorithm) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters) + } + + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm)) + return next.HandleSerialize(ctx, in) +} + +func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error { + return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go new file mode 100644 index 000000000..2be5df30f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -0,0 +1,22665 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strconv" + "strings" +) + +type awsRestxml_deserializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata) + } + output := &AbortMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchUpload", errorCode): + return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata) + } + output := &CompleteMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CompleteMultipartUploadOutput + if *v == nil { + sv = &CompleteMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Location", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Location = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCopyObject struct { +} + +func (*awsRestxml_deserializeOpCopyObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata) + } + output := &CopyObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectNotInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CopyObjectOutput + if *v == nil { + sv = &CopyObjectOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyObjectResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCreateBucket struct { +} + +func (*awsRestxml_deserializeOpCreateBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata) + } + output := &CreateBucketOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("BucketAlreadyExists", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody) + + case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("Location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Location = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata) + } + output := &CreateMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateMultipartUploadOutput + if *v == nil { + sv = &CreateMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCreateSession struct { +} + +func (*awsRestxml_deserializeOpCreateSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata) + } + output := &CreateSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateSessionOutput + if *v == nil { + sv = &CreateSessionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteBucket struct { +} + +func (*awsRestxml_deserializeOpDeleteBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata) + } + output := &DeleteBucketOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata) + } + output := &DeleteBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata) + } + output := &DeleteBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata) + } + output := &DeleteBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &DeleteBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata) + } + output := &DeleteBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata) + } + output := &DeleteBucketLifecycleOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata) + } + output := &DeleteBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata) + } + output := &DeleteBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata) + } + output := &DeleteBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata) + } + output := &DeleteBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata) + } + output := &DeleteBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata) + } + output := &DeleteBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteObject struct { +} + +func (*awsRestxml_deserializeOpDeleteObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata) + } + output := &DeleteObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeleteObjects struct { +} + +func (*awsRestxml_deserializeOpDeleteObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata) + } + output := &DeleteObjectsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeleteObjectsOutput + if *v == nil { + sv = &DeleteObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Deleted", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata) + } + output := &DeleteObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata) + } + output := &DeletePublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata) + } + output := &GetBucketAccelerateConfigurationOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(v *GetBucketAccelerateConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAccelerateConfigurationOutput + if *v == nil { + sv = &GetBucketAccelerateConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketAccelerateStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAcl struct { +} + +func (*awsRestxml_deserializeOpGetBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata) + } + output := &GetBucketAclOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAclOutput + if *v == nil { + sv = &GetBucketAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata) + } + output := &GetBucketAnalyticsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAnalyticsConfigurationOutput + if *v == nil { + sv = &GetBucketAnalyticsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketCors struct { +} + +func (*awsRestxml_deserializeOpGetBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata) + } + output := &GetBucketCorsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketCorsOutput + if *v == nil { + sv = &GetBucketCorsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CORSRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata) + } + output := &GetBucketEncryptionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketEncryptionOutput + if *v == nil { + sv = &GetBucketEncryptionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &GetBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketIntelligentTieringConfigurationOutput + if *v == nil { + sv = &GetBucketIntelligentTieringConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata) + } + output := &GetBucketInventoryConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketInventoryConfigurationOutput + if *v == nil { + sv = &GetBucketInventoryConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata) + } + output := &GetBucketLifecycleConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLifecycleConfigurationOutput + if *v == nil { + sv = &GetBucketLifecycleConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLocationOutput + if *v == nil { + sv = &GetBucketLocationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LocationConstraint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LocationConstraint = types.BucketLocationConstraint(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLogging struct { +} + +func (*awsRestxml_deserializeOpGetBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata) + } + output := &GetBucketLoggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLoggingOutput + if *v == nil { + sv = &GetBucketLoggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LoggingEnabled", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) + } + output := &GetBucketMetricsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketMetricsConfigurationOutput + if *v == nil { + sv = &GetBucketMetricsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + } + output := &GetBucketNotificationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketNotificationConfigurationOutput + if *v == nil { + sv = &GetBucketNotificationConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("QueueConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TopicConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata) + } + output := &GetBucketOwnershipControlsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketOwnershipControlsOutput + if *v == nil { + sv = &GetBucketOwnershipControlsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("OwnershipControls", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata) + } + output := &GetBucketPolicyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + var buf bytes.Buffer + if contentLength > 0 { + buf.Grow(int(contentLength)) + } else { + buf.Grow(512) + } + + _, err := buf.ReadFrom(body) + if err != nil { + return err + } + if buf.Len() > 0 { + v.Policy = ptr.String(buf.String()) + } + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata) + } + output := &GetBucketPolicyStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketPolicyStatusOutput + if *v == nil { + sv = &GetBucketPolicyStatusOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PolicyStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketReplication struct { +} + +func (*awsRestxml_deserializeOpGetBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata) + } + output := &GetBucketReplicationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketReplicationOutput + if *v == nil { + sv = &GetBucketReplicationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicationConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata) + } + output := &GetBucketRequestPaymentOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketRequestPaymentOutput + if *v == nil { + sv = &GetBucketRequestPaymentOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Payer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Payer = types.Payer(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketTagging struct { +} + +func (*awsRestxml_deserializeOpGetBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata) + } + output := &GetBucketTaggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketTaggingOutput + if *v == nil { + sv = &GetBucketTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata) + } + output := &GetBucketVersioningOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketVersioningOutput + if *v == nil { + sv = &GetBucketVersioningOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MfaDelete", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.MFADelete = types.MFADeleteStatus(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketVersioningStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata) + } + output := &GetBucketWebsiteOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketWebsiteOutput + if *v == nil { + sv = &GetBucketWebsiteOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IndexDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RoutingRules", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObject struct { +} + +func (*awsRestxml_deserializeOpGetObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata) + } + output := &GetObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidObjectState", errorCode): + return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody) + + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = ptr.Int64(vv) + } + + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = ptr.Int32(int32(vv)) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = ptr.Int32(int32(vv)) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.TagCount = ptr.Int32(int32(vv)) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetObjectAcl struct { +} + +func (*awsRestxml_deserializeOpGetObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata) + } + output := &GetObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAclOutput + if *v == nil { + sv = &GetObjectAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata) + } + output := &GetObjectAttributesOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAttributesOutput + if *v == nil { + sv = &GetObjectAttributesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Checksum", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("ObjectParts", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ObjectSize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSize = ptr.Int64(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata) + } + output := &GetObjectLegalHoldOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLegalHoldOutput + if *v == nil { + sv = &GetObjectLegalHoldOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LegalHold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata) + } + output := &GetObjectLockConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLockConfigurationOutput + if *v == nil { + sv = &GetObjectLockConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectRetention struct { +} + +func (*awsRestxml_deserializeOpGetObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata) + } + output := &GetObjectRetentionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectRetentionOutput + if *v == nil { + sv = &GetObjectRetentionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Retention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTagging struct { +} + +func (*awsRestxml_deserializeOpGetObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata) + } + output := &GetObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectTaggingOutput + if *v == nil { + sv = &GetObjectTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata) + } + output := &GetObjectTorrentOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata) + } + output := &GetPublicAccessBlockOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetPublicAccessBlockOutput + if *v == nil { + sv = &GetPublicAccessBlockOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpHeadBucket struct { +} + +func (*awsRestxml_deserializeOpHeadBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata) + } + output := &HeadBucketOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NotFound", errorCode): + return awsRestxml_deserializeErrorNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.AccessPointAlias = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationName = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationType = types.LocationType(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketRegion = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpHeadObject struct { +} + +func (*awsRestxml_deserializeOpHeadObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata) + } + output := &HeadObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NotFound", errorCode): + return awsRestxml_deserializeErrorNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ArchiveStatus = types.ArchiveStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = ptr.Int64(vv) + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = ptr.Int32(int32(vv)) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = ptr.Int32(int32(vv)) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata) + } + output := &ListBucketAnalyticsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketAnalyticsConfigurationsOutput + if *v == nil { + sv = &ListBucketAnalyticsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata) + } + output := &ListBucketIntelligentTieringConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketIntelligentTieringConfigurationsOutput + if *v == nil { + sv = &ListBucketIntelligentTieringConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata) + } + output := &ListBucketInventoryConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketInventoryConfigurationsOutput + if *v == nil { + sv = &ListBucketInventoryConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata) + } + output := &ListBucketMetricsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketMetricsConfigurationsOutput + if *v == nil { + sv = &ListBucketMetricsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBuckets struct { +} + +func (*awsRestxml_deserializeOpListBuckets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata) + } + output := &ListBucketsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketsOutput + if *v == nil { + sv = &ListBucketsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Buckets", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListDirectoryBuckets struct { +} + +func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata) + } + output := &ListDirectoryBucketsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListDirectoryBucketsOutput + if *v == nil { + sv = &ListDirectoryBucketsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Buckets", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListMultipartUploads struct { +} + +func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) + } + output := &ListMultipartUploadsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(v *ListMultipartUploadsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListMultipartUploadsOutput + if *v == nil { + sv = &ListMultipartUploadsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxUploads", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxUploads = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextUploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextUploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("UploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Upload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjects struct { +} + +func (*awsRestxml_deserializeOpListObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata) + } + output := &ListObjectsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListObjectsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListObjectsOutput(v *ListObjectsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsOutput + if *v == nil { + sv = &ListObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectsV2 struct { +} + +func (*awsRestxml_deserializeOpListObjectsV2) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata) + } + output := &ListObjectsV2Output{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(v *ListObjectsV2Output, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsV2Output + if *v == nil { + sv = &ListObjectsV2Output{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("KeyCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.KeyCount = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("StartAfter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StartAfter = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectVersions struct { +} + +func (*awsRestxml_deserializeOpListObjectVersions) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata) + } + output := &ListObjectVersionsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(v *ListObjectVersionsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectVersionsOutput + if *v == nil { + sv = &ListObjectVersionsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("DeleteMarker", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextVersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextVersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("VersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Version", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListParts struct { +} + +func (*awsRestxml_deserializeOpListParts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata) + } + output := &ListPartsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListPartsOutput + if *v == nil { + sv = &ListPartsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata) + } + output := &PutBucketAccelerateConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAcl struct { +} + +func (*awsRestxml_deserializeOpPutBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata) + } + output := &PutBucketAclOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata) + } + output := &PutBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketCors struct { +} + +func (*awsRestxml_deserializeOpPutBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata) + } + output := &PutBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata) + } + output := &PutBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &PutBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata) + } + output := &PutBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata) + } + output := &PutBucketLifecycleConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLogging struct { +} + +func (*awsRestxml_deserializeOpPutBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata) + } + output := &PutBucketLoggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata) + } + output := &PutBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata) + } + output := &PutBucketNotificationConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata) + } + output := &PutBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata) + } + output := &PutBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketReplication struct { +} + +func (*awsRestxml_deserializeOpPutBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata) + } + output := &PutBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata) + } + output := &PutBucketRequestPaymentOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketTagging struct { +} + +func (*awsRestxml_deserializeOpPutBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata) + } + output := &PutBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata) + } + output := &PutBucketVersioningOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata) + } + output := &PutBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutObject struct { +} + +func (*awsRestxml_deserializeOpPutObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata) + } + output := &PutObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectAcl struct { +} + +func (*awsRestxml_deserializeOpPutObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata) + } + output := &PutObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata) + } + output := &PutObjectLegalHoldOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata) + } + output := &PutObjectLockConfigurationOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectRetention struct { +} + +func (*awsRestxml_deserializeOpPutObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata) + } + output := &PutObjectRetentionOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectTagging struct { +} + +func (*awsRestxml_deserializeOpPutObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata) + } + output := &PutObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata) + } + output := &PutPublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpRestoreObject struct { +} + +func (*awsRestxml_deserializeOpRestoreObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata) + } + output := &RestoreObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RestoreOutputPath = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpSelectObjectContent struct { +} + +func (*awsRestxml_deserializeOpSelectObjectContent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata) + } + output := &SelectObjectContentOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpUploadPart struct { +} + +func (*awsRestxml_deserializeOpUploadPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata) + } + output := &UploadPartOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpUploadPartCopy struct { +} + +func (*awsRestxml_deserializeOpUploadPartCopy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata) + } + output := &UploadPartCopyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *UploadPartCopyOutput + if *v == nil { + sv = &UploadPartCopyOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyPartResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata) + } + output := &WriteGetObjectResponseOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader) + if eventType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader) + } + + switch { + case strings.EqualFold("Cont", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberCont{} + if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("End", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberEnd{} + if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Progress", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberProgress{} + if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Records", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberRecords{} + if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Stats", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberStats{} + if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + default: + buffer := bytes.NewBuffer(nil) + eventstream.NewEncoder().Encode(buffer, *msg) + *v = &types.UnknownUnionMember{ + Tag: eventType.String(), + Value: buffer.Bytes(), + } + return nil + + } +} + +func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error { + exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader) + if exceptionType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader) + } + + switch { + default: + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(br, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + errorComponents, err := awsxml.GetErrorResponseComponents(br, true) + if err != nil { + return err + } + errorCode := "UnknownError" + errorMessage := errorCode + if ev := exceptionType.String(); len(ev) > 0 { + errorCode = ev + } else if ev := errorComponents.Code; len(ev) > 0 { + errorCode = ev + } + if ev := errorComponents.Message; len(ev) > 0 { + errorMessage = ev + } + return &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + } +} + +func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + if msg.Payload != nil { + bsv := make([]byte, len(msg.Payload)) + copy(bsv, msg.Payload) + + v.Payload = bsv + } + return nil +} + +func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentStats(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentEndEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ContinuationEvent + if *v == nil { + sv = &types.ContinuationEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EndEvent + if *v == nil { + sv = &types.EndEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Progress + if *v == nil { + sv = &types.Progress{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = ptr.Int64(i64) + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = ptr.Int64(i64) + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Stats + if *v == nil { + sv = &types.Stats{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = ptr.Int64(i64) + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = ptr.Int64(i64) + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyExists{} + return output +} + +func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyOwnedByYou{} + return output +} + +func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidObjectState{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchBucket{} + return output +} + +func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchKey{} + return output +} + +func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchUpload{} + return output +} + +func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NotFound{} + return output +} + +func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectAlreadyInActiveTierError{} + return output +} + +func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectNotInActiveTierError{} + return output +} + +func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AbortIncompleteMultipartUpload + if *v == nil { + sv = &types.AbortIncompleteMultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DaysAfterInitiation", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DaysAfterInitiation = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AccessControlTranslation + if *v == nil { + sv = &types.AccessControlTranslation{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Owner", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Owner = types.OwnerOverride(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsAndOperator + if *v == nil { + sv = &types.AnalyticsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsConfiguration + if *v == nil { + sv = &types.AnalyticsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("StorageClassAnalysis", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.AnalyticsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.AnalyticsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsExportDestination + if *v == nil { + sv = &types.AnalyticsExportDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.AnalyticsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.AnalyticsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.AnalyticsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsS3BucketDestination + if *v == nil { + sv = &types.AnalyticsS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("BucketAccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketAccountId = ptr.String(xtv) + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.AnalyticsS3ExportFileFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Bucket + if *v == nil { + sv = &types.Bucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CreationDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.CreationDate = ptr.Time(t) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyExists + if *v == nil { + sv = &types.BucketAlreadyExists{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyOwnedByYou + if *v == nil { + sv = &types.BucketAlreadyOwnedByYou{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Bucket", t.Name.Local): + var col types.Bucket + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Bucket + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Checksum + if *v == nil { + sv = &types.Checksum{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ChecksumAlgorithm + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ChecksumAlgorithm + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CommonPrefix + if *v == nil { + sv = &types.CommonPrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CommonPrefix + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CommonPrefix + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Condition + if *v == nil { + sv = &types.Condition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpErrorCodeReturnedEquals = ptr.String(xtv) + } + + case strings.EqualFold("KeyPrefixEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyPrefixEquals = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyObjectResult + if *v == nil { + sv = &types.CopyObjectResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyPartResult + if *v == nil { + sv = &types.CopyPartResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CORSRule + if *v == nil { + sv = &types.CORSRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AllowedHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedMethod", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedOrigin", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExposeHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("MaxAgeSeconds", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxAgeSeconds = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CORSRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CORSRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DefaultRetention + if *v == nil { + sv = &types.DefaultRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("Years", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Years = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeletedObject + if *v == nil { + sv = &types.DeletedObject{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) + } + sv.DeleteMarker = ptr.Bool(xtv) + } + + case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DeleteMarkerVersionId = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeletedObject + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeletedObject + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerEntry + if *v == nil { + sv = &types.DeleteMarkerEntry{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = ptr.Bool(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerReplication + if *v == nil { + sv = &types.DeleteMarkerReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.DeleteMarkerReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeleteMarkerEntry + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeleteMarkerEntry + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Destination + if *v == nil { + sv = &types.Destination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlTranslation", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("EncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Metrics", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ReplicationTime", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionConfiguration + if *v == nil { + sv = &types.EncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplicaKmsKeyID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Error + if *v == nil { + sv = &types.Error{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Code", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Code = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDocument + if *v == nil { + sv = &types.ErrorDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Error + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Error + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EventBridgeConfiguration + if *v == nil { + sv = &types.EventBridgeConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Event + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.Event(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Event + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.Event(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExistingObjectReplication + if *v == nil { + sv = &types.ExistingObjectReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExistingObjectReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FilterRule + if *v == nil { + sv = &types.FilterRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = types.FilterRuleName(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.FilterRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.FilterRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetObjectAttributesParts + if *v == nil { + sv = &types.GetObjectAttributesParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartsCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalPartsCount = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grant + if *v == nil { + sv = &types.Grant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.Permission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grantee + if *v == nil { + sv = &types.Grantee{} + } else { + sv = *v + } + + for _, attr := range decoder.StartEl.Attr { + name := attr.Name.Local + if len(attr.Name.Space) != 0 { + name = attr.Name.Space + `:` + attr.Name.Local + } + switch { + case strings.EqualFold("xsi:type", name): + val := []byte(attr.Value) + { + xtv := string(val) + sv.Type = types.Type(xtv) + } + + } + } + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("EmailAddress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EmailAddress = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("URI", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.URI = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.Grant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Grant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IndexDocument + if *v == nil { + sv = &types.IndexDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Suffix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Suffix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Initiator + if *v == nil { + sv = &types.Initiator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringAndOperator + if *v == nil { + sv = &types.IntelligentTieringAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringConfiguration + if *v == nil { + sv = &types.IntelligentTieringConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.IntelligentTieringStatus(xtv) + } + + case strings.EqualFold("Tiering", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.IntelligentTieringConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.IntelligentTieringConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringFilter + if *v == nil { + sv = &types.IntelligentTieringFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidObjectState + if *v == nil { + sv = &types.InvalidObjectState{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryConfiguration + if *v == nil { + sv = &types.InventoryConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("IncludedObjectVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv) + } + + case strings.EqualFold("IsEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) + } + sv.IsEnabled = ptr.Bool(xtv) + } + + case strings.EqualFold("OptionalFields", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Schedule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.InventoryConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryDestination + if *v == nil { + sv = &types.InventoryDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryEncryption + if *v == nil { + sv = &types.InventoryEncryption{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("SSE-KMS", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SSE-S3", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryFilter + if *v == nil { + sv = &types.InventoryFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("Field", t.Name.Local): + var col types.InventoryOptionalField + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.InventoryOptionalField(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryOptionalField + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.InventoryOptionalField(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryS3BucketDestination + if *v == nil { + sv = &types.InventoryS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccountId = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Encryption", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.InventoryFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventorySchedule + if *v == nil { + sv = &types.InventorySchedule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Frequency", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Frequency = types.InventoryFrequency(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LambdaFunctionConfiguration + if *v == nil { + sv = &types.LambdaFunctionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("CloudFunction", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LambdaFunctionArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LambdaFunctionConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LambdaFunctionConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleExpiration + if *v == nil { + sv = &types.LifecycleExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) + } + sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRule + if *v == nil { + sv = &types.LifecycleRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Expiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExpirationStatus(xtv) + } + + case strings.EqualFold("Transition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRuleAndOperator + if *v == nil { + sv = &types.LifecycleRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeGreaterThan = ptr.Int64(i64) + } + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeLessThan = ptr.Int64(i64) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.LifecycleRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.LifecycleRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LifecycleRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LifecycleRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LoggingEnabled + if *v == nil { + sv = &types.LoggingEnabled{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TargetBucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetBucket = ptr.String(xtv) + } + + case strings.EqualFold("TargetGrants", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TargetObjectKeyFormat", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTargetObjectKeyFormat(&sv.TargetObjectKeyFormat, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TargetPrefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetPrefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Metrics + if *v == nil { + sv = &types.Metrics{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventThreshold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.MetricsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsAndOperator + if *v == nil { + sv = &types.MetricsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessPointArn = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsConfiguration + if *v == nil { + sv = &types.MetricsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MetricsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MetricsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.MetricsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} + memberFound = true + + case strings.EqualFold("And", t.Name.Local): + var mv types.MetricsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MultipartUpload + if *v == nil { + sv = &types.MultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Initiated = ptr.Time(t) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MultipartUpload + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MultipartUpload + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionExpiration + if *v == nil { + sv = &types.NoncurrentVersionExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionTransition + if *v == nil { + sv = &types.NoncurrentVersionTransition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.NoncurrentVersionTransition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.NoncurrentVersionTransition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchBucket + if *v == nil { + sv = &types.NoSuchBucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchKey + if *v == nil { + sv = &types.NoSuchKey{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchUpload + if *v == nil { + sv = &types.NoSuchUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotFound + if *v == nil { + sv = &types.NotFound{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotificationConfigurationFilter + if *v == nil { + sv = &types.NotificationConfigurationFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3Key", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Object + if *v == nil { + sv = &types.Object{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RestoreStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = ptr.Int64(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectAlreadyInActiveTierError + if *v == nil { + sv = &types.ObjectAlreadyInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Object + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Object + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockConfiguration + if *v == nil { + sv = &types.ObjectLockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockLegalHold + if *v == nil { + sv = &types.ObjectLockLegalHold{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ObjectLockLegalHoldStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRetention + if *v == nil { + sv = &types.ObjectLockRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("RetainUntilDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.RetainUntilDate = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRule + if *v == nil { + sv = &types.ObjectLockRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DefaultRetention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectNotInActiveTierError + if *v == nil { + sv = &types.ObjectNotInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectPart + if *v == nil { + sv = &types.ObjectPart{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectVersion + if *v == nil { + sv = &types.ObjectVersion{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = ptr.Bool(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RestoreStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = ptr.Int64(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectVersionStorageClass(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectVersion + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectVersion + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Owner + if *v == nil { + sv = &types.Owner{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControls + if *v == nil { + sv = &types.OwnershipControls{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControlsRule + if *v == nil { + sv = &types.OwnershipControlsRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectOwnership", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectOwnership = types.ObjectOwnership(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.OwnershipControlsRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.OwnershipControlsRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Part + if *v == nil { + sv = &types.Part{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartitionedPrefix(v **types.PartitionedPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PartitionedPrefix + if *v == nil { + sv = &types.PartitionedPrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PartitionDateSource", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartitionDateSource = types.PartitionDateSource(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Part + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Part + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectPart + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectPart + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PolicyStatus + if *v == nil { + sv = &types.PolicyStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsPublic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) + } + sv.IsPublic = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PublicAccessBlockConfiguration + if *v == nil { + sv = &types.PublicAccessBlockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BlockPublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicAcls = ptr.Bool(xtv) + } + + case strings.EqualFold("BlockPublicPolicy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicPolicy = ptr.Bool(xtv) + } + + case strings.EqualFold("IgnorePublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.IgnorePublicAcls = ptr.Bool(xtv) + } + + case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.RestrictPublicBuckets = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.QueueConfiguration + if *v == nil { + sv = &types.QueueConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Queue", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.QueueArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.QueueConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.QueueConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Redirect + if *v == nil { + sv = &types.Redirect{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("HttpRedirectCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpRedirectCode = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyPrefixWith = ptr.String(xtv) + } + + case strings.EqualFold("ReplaceKeyWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyWith = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RedirectAllRequestsTo + if *v == nil { + sv = &types.RedirectAllRequestsTo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicaModifications + if *v == nil { + sv = &types.ReplicaModifications{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicaModificationsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationConfiguration + if *v == nil { + sv = &types.ReplicationConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Role", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Role = ptr.String(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRule + if *v == nil { + sv = &types.ReplicationRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarkerReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExistingObjectReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Priority", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Priority = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationRuleStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRuleAndOperator + if *v == nil { + sv = &types.ReplicationRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.ReplicationRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.ReplicationRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ReplicationRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ReplicationRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTime + if *v == nil { + sv = &types.ReplicationTime{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationTimeStatus(xtv) + } + + case strings.EqualFold("Time", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTimeValue + if *v == nil { + sv = &types.ReplicationTimeValue{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Minutes", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Minutes = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RestoreStatus + if *v == nil { + sv = &types.RestoreStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsRestoreInProgress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val) + } + sv.IsRestoreInProgress = ptr.Bool(xtv) + } + + case strings.EqualFold("RestoreExpiryDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.RestoreExpiryDate = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RoutingRule + if *v == nil { + sv = &types.RoutingRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Condition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Redirect", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("RoutingRule", t.Name.Local): + var col types.RoutingRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.RoutingRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3KeyFilter + if *v == nil { + sv = &types.S3KeyFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("FilterRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionByDefault + if *v == nil { + sv = &types.ServerSideEncryptionByDefault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KMSMasterKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KMSMasterKeyID = ptr.String(xtv) + } + + case strings.EqualFold("SSEAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SSEAlgorithm = types.ServerSideEncryption(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionConfiguration + if *v == nil { + sv = &types.ServerSideEncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionRule + if *v == nil { + sv = &types.ServerSideEncryptionRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("BucketKeyEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) + } + sv.BucketKeyEnabled = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ServerSideEncryptionRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ServerSideEncryptionRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SessionCredentials + if *v == nil { + sv = &types.SessionCredentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SimplePrefix + if *v == nil { + sv = &types.SimplePrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SourceSelectionCriteria + if *v == nil { + sv = &types.SourceSelectionCriteria{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaModifications", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSEKMS + if *v == nil { + sv = &types.SSEKMS{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SseKmsEncryptedObjects + if *v == nil { + sv = &types.SseKmsEncryptedObjects{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.SseKmsEncryptedObjectsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSES3 + if *v == nil { + sv = &types.SSES3{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysis + if *v == nil { + sv = &types.StorageClassAnalysis{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DataExport", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysisDataExport + if *v == nil { + sv = &types.StorageClassAnalysisDataExport{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("OutputSchemaVersion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Tag", t.Name.Local): + var col types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tag + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TargetGrant + if *v == nil { + sv = &types.TargetGrant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.BucketLogsPermission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.TargetGrant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TargetGrant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTargetObjectKeyFormat(v **types.TargetObjectKeyFormat, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TargetObjectKeyFormat + if *v == nil { + sv = &types.TargetObjectKeyFormat{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PartitionedPrefix", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartitionedPrefix(&sv.PartitionedPrefix, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SimplePrefix", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSimplePrefix(&sv.SimplePrefix, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tiering + if *v == nil { + sv = &types.Tiering{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Tiering + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tiering + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TopicConfiguration + if *v == nil { + sv = &types.TopicConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Topic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TopicArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.TopicConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TopicConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Transition + if *v == nil { + sv = &types.Transition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Transition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Transition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go new file mode 100644 index 000000000..d825a41a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go @@ -0,0 +1,5 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package s3 provides the API client, operations, and parameter types for Amazon +// Simple Storage Service. +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go new file mode 100644 index 000000000..bb5f4cf47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go @@ -0,0 +1,126 @@ +package s3 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + smithyauth "github.com/aws/smithy-go/auth" +) + +type endpointAuthResolver struct { + EndpointResolver EndpointResolverV2 +} + +var _ AuthSchemeResolver = (*endpointAuthResolver)(nil) + +func (r *endpointAuthResolver) ResolveAuthSchemes( + ctx context.Context, params *AuthResolverParameters, +) ( + []*smithyauth.Option, error, +) { + if params.endpointParams.Region == nil { + // #2502: We're correcting the endpoint binding behavior to treat empty + // Region as "unset" (nil), but auth resolution technically doesn't + // care and someone could be using V1 or non-default V2 endpoint + // resolution, both of which would bypass the required-region check. + // They shouldn't be broken because the region is technically required + // by this service's endpoint-based auth resolver, so we stub it here. + params.endpointParams.Region = aws.String("") + } + + opts, err := r.resolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + // canonicalize sigv4-s3express ID + for _, opt := range opts { + if opt.SchemeID == "sigv4-s3express" { + opt.SchemeID = "com.amazonaws.s3#sigv4express" + } + } + + // preserve pre-SRA behavior where everything technically had anonymous + return append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }), nil +} + +func (r *endpointAuthResolver) resolveAuthSchemes( + ctx context.Context, params *AuthResolverParameters, +) ( + []*smithyauth.Option, error, +) { + baseOpts, err := (&defaultAuthSchemeResolver{}).ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, fmt.Errorf("get base options: %w", err) + } + + endpt, err := r.EndpointResolver.ResolveEndpoint(ctx, *params.endpointParams) + if err != nil { + return nil, fmt.Errorf("resolve endpoint: %w", err) + } + + endptOpts, ok := smithyauth.GetAuthOptions(&endpt.Properties) + if !ok { + return baseOpts, nil + } + + // the list of options from the endpoint is authoritative, however, the + // modeled options have some properties that the endpoint ones don't, so we + // start from the latter and merge in + for _, endptOpt := range endptOpts { + if baseOpt := findScheme(baseOpts, endptOpt.SchemeID); baseOpt != nil { + rebaseProps(endptOpt, baseOpt) + } + } + + return endptOpts, nil +} + +// rebase the properties of dst, taking src as the base and overlaying those +// from dst +func rebaseProps(dst, src *smithyauth.Option) { + iprops, sprops := src.IdentityProperties, src.SignerProperties + + iprops.SetAll(&dst.IdentityProperties) + sprops.SetAll(&dst.SignerProperties) + + dst.IdentityProperties = iprops + dst.SignerProperties = sprops +} + +func findScheme(opts []*smithyauth.Option, schemeID string) *smithyauth.Option { + for _, opt := range opts { + if opt.SchemeID == schemeID { + return opt + } + } + return nil +} + +func finalizeServiceEndpointAuthResolver(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &endpointAuthResolver{ + EndpointResolver: options.EndpointResolverV2, + } +} + +func finalizeOperationEndpointAuthResolver(options *Options) { + resolver, ok := options.AuthSchemeResolver.(*endpointAuthResolver) + if !ok { + return + } + + if resolver.EndpointResolver == options.EndpointResolverV2 { + return + } + + options.AuthSchemeResolver = &endpointAuthResolver{ + EndpointResolver: options.EndpointResolverV2, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go new file mode 100644 index 000000000..e6ae2e872 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -0,0 +1,5829 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/endpoints/private/rulesfn" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "s3" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + + if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + if options.UseDualstack { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled + } else { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The S3 bucket used to send the request. This is an optional parameter that will + // be set automatically for operations that are scoped to an S3 bucket. + // + // Parameter + // is required. + Bucket *string + + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // When true, force a path-style endpoint to be used where the bucket name is part + // of the path. + // + // Defaults to false if no value is + // provided. + // + // AWS::S3::ForcePathStyle + ForcePathStyle *bool + + // When true, use S3 Accelerate. NOTE: Not all regions support S3 + // accelerate. + // + // Defaults to false if no value is provided. + // + // AWS::S3::Accelerate + Accelerate *bool + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + // + // Defaults to false if no value is + // provided. + // + // AWS::S3::UseGlobalEndpoint + UseGlobalEndpoint *bool + + // Internal parameter to use object lambda endpoint for an operation (eg: + // WriteGetObjectResponse) + // + // Parameter is required. + UseObjectLambdaEndpoint *bool + + // The S3 Key used to send the request. This is an optional parameter that will be + // set automatically for operations that are scoped to an S3 Key. + // + // Parameter is + // required. + Key *string + + // The S3 Prefix used to send the request. This is an optional parameter that will + // be set automatically for operations that are scoped to an S3 Prefix. + // + // Parameter + // is required. + Prefix *string + + // Internal parameter to disable Access Point Buckets + // + // Parameter is required. + DisableAccessPoints *bool + + // Whether multi-region access points (MRAP) should be disabled. + // + // Defaults to false + // if no value is provided. + // + // AWS::S3::DisableMultiRegionAccessPoints + DisableMultiRegionAccessPoints *bool + + // When an Access Point ARN is provided and this flag is enabled, the SDK MUST use + // the ARN's region when constructing the endpoint instead of the client's + // configured region. + // + // Parameter is required. + // + // AWS::S3::UseArnRegion + UseArnRegion *bool + + // Internal parameter to indicate whether S3Express operation should use control + // plane, (ex. CreateBucket) + // + // Parameter is required. + UseS3ExpressControlEndpoint *bool + + // Parameter to indicate whether S3Express session auth should be + // disabled + // + // Parameter is required. + DisableS3ExpressSessionAuth *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.Accelerate == nil { + return fmt.Errorf("parameter Accelerate is required") + } + + if p.DisableMultiRegionAccessPoints == nil { + return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") + } + + if p.ForcePathStyle == nil { + return fmt.Errorf("parameter ForcePathStyle is required") + } + + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.Accelerate == nil { + p.Accelerate = ptr.Bool(false) + } + + if p.DisableMultiRegionAccessPoints == nil { + p.DisableMultiRegionAccessPoints = ptr.Bool(false) + } + + if p.ForcePathStyle == nil { + p.ForcePathStyle = ptr.Bool(false) + } + + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseFIPS := *params.UseFIPS + _UseDualStack := *params.UseDualStack + _ForcePathStyle := *params.ForcePathStyle + _Accelerate := *params.Accelerate + _UseGlobalEndpoint := *params.UseGlobalEndpoint + _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints + + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if _Accelerate == true { + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") + } + } + if _UseDualStack == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") + } + } + if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _partitionResult.Name == "aws-cn" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil { + _bucketSuffix := *exprVal + _ = _bucketSuffix + if _bucketSuffix == "--x-s3" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if !(params.Endpoint != nil) { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if !(params.Bucket != nil) { + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 49, 50, true); exprVal != nil { + _hardwareType := *exprVal + _ = _hardwareType + if exprVal := rulesfn.SubString(_Bucket, 8, 12, true); exprVal != nil { + _regionPrefix := *exprVal + _ = _regionPrefix + if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { + _bucketAliasSuffix := *exprVal + _ = _bucketAliasSuffix + if exprVal := rulesfn.SubString(_Bucket, 32, 49, true); exprVal != nil { + _outpostId := *exprVal + _ = _outpostId + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _regionPartition := *exprVal + _ = _regionPartition + if _bucketAliasSuffix == "--op-s3" { + if rulesfn.IsValidHostLabel(_outpostId, false) { + if _hardwareType == "e" { + if _regionPrefix == "beta" { + if !(params.Endpoint != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".ec2.") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".ec2.s3-outposts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_regionPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _hardwareType == "o" { + if _regionPrefix == "beta" { + if !(params.Endpoint != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".op-") + out.WriteString(_outpostId) + out.WriteString(".") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".op-") + out.WriteString(_outpostId) + out.WriteString(".s3-outposts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_regionPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Unrecognized hardware type: \"Expected hardware type o or e but got ") + out.WriteString(_hardwareType) + out.WriteString("\"") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.") + } + } + } + } + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if !(rulesfn.ParseURL(_Endpoint) != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Custom endpoint `") + out.WriteString(_Endpoint) + out.WriteString("` was not a valid URI") + return out.String() + }()) + } + } + if _ForcePathStyle == false { + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, false) { + if _Accelerate == true { + if _partitionResult.Name == "aws-cn" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Accelerate cannot be used in this region") + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.Scheme == "http" { + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, true) { + if _ForcePathStyle == false { + if _UseFIPS == false { + if _UseDualStack == false { + if _Accelerate == false { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + } + } + } + } + } + } + if _ForcePathStyle == false { + if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { + _bucketArn := *exprVal + _ = _bucketArn + if exprVal := _bucketArn.ResourceId.Get(0); exprVal != nil { + _arnType := *exprVal + _ = _arnType + if !(_arnType == "") { + if _bucketArn.Service == "s3-object-lambda" { + if _arnType == "accesspoint" { + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if !(_accessPointName == "") { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") + } + if !(_bucketArn.Region == "") { + if exprVal := params.DisableAccessPoints; exprVal != nil { + _DisableAccessPoints := *exprVal + _ = _DisableAccessPoints + if _DisableAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") + } + } + if !(_bucketArn.ResourceId.Get(2) != nil) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if _bucketArn.AccountId == "" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Missing account id") + } + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if rulesfn.IsValidHostLabel(_accessPointName, false) { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-object-lambda-fips.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-object-lambda.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_accessPointName) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: bucket ARN is missing a region") + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `") + out.WriteString(_arnType) + out.WriteString("`") + return out.String() + }()) + } + if _arnType == "accesspoint" { + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if !(_accessPointName == "") { + if !(_bucketArn.Region == "") { + if _arnType == "accesspoint" { + if !(_bucketArn.Region == "") { + if exprVal := params.DisableAccessPoints; exprVal != nil { + _DisableAccessPoints := *exprVal + _ = _DisableAccessPoints + if _DisableAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") + } + } + if !(_bucketArn.ResourceId.Get(2) != nil) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if _bucketArn.Service == "s3" { + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if rulesfn.IsValidHostLabel(_accessPointName, false) { + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access Points do not support S3 Accelerate") + } + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint-fips.dualstack.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint-fips.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint.dualstack.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_accessPointName) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The ARN was not for the S3 service, found: ") + out.WriteString(_bucketArn.Service) + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + if rulesfn.IsValidHostLabel(_accessPointName, true) { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support dual-stack") + } + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support FIPS") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support S3 Accelerate") + } + if _DisableMultiRegionAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid configuration: Multi-Region Access Point ARNs are disabled.") + } + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _mrapPartition := *exprVal + _ = _mrapPartition + if _mrapPartition.Name == _bucketArn.Partition { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString(".accesspoint.s3-global.") + out.WriteString(_mrapPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_mrapPartition.Name) + out.WriteString("` but bucket referred to partition `") + out.WriteString(_bucketArn.Partition) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Access Point Name") + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") + } + if _bucketArn.Service == "s3-outposts" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support Dual-stack") + } + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support FIPS") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") + } + if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { + _var_275 := *exprVal + _ = _var_275 + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") + } + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _outpostId := *exprVal + _ = _outpostId + if rulesfn.IsValidHostLabel(_outpostId, false) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if exprVal := _bucketArn.ResourceId.Get(2); exprVal != nil { + _outpostType := *exprVal + _ = _outpostType + if exprVal := _bucketArn.ResourceId.Get(3); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if _outpostType == "accesspoint" { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_outpostId) + out.WriteString(".") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_outpostId) + out.WriteString(".s3-outposts.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Expected an outpost type `accesspoint`, found ") + out.WriteString(_outpostType) + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: expected an access point name") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a 4-component resource") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_outpostId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The Outpost Id was not set") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: Unrecognized format: ") + out.WriteString(_Bucket) + out.WriteString(" (type: ") + out.WriteString(_arnType) + out.WriteString(")") + return out.String() + }()) + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: No ARN type specified") + } + } + if exprVal := rulesfn.SubString(_Bucket, 0, 4, false); exprVal != nil { + _arnPrefix := *exprVal + _ = _arnPrefix + if _arnPrefix == "arn:" { + if !(awsrulesfn.ParseARN(_Bucket) != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: `") + out.WriteString(_Bucket) + out.WriteString("` was not a valid ARN") + return out.String() + }()) + } + } + } + if _ForcePathStyle == true { + if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { + _var_288 := *exprVal + _ = _var_288 + return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") + } + } + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _Accelerate == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with S3 Accelerate") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + if exprVal := params.UseObjectLambdaEndpoint; exprVal != nil { + _UseObjectLambdaEndpoint := *exprVal + _ = _UseObjectLambdaEndpoint + if _UseObjectLambdaEndpoint == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, true) { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-object-lambda-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-object-lambda.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if !(params.Bucket != nil) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, true) { + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "A region must be set when sending requests to S3.") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + params.ForcePathStyle = aws.Bool(options.UsePathStyle) + params.Accelerate = aws.Bool(options.UseAccelerate) + params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints) + params.UseArnRegion = aws.Bool(options.UseARNRegion) + + params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + backend := s3cust.GetPropertiesBackend(&endpt.Properties) + ctx = internalcontext.SetS3Backend(ctx, backend) + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go new file mode 100644 index 000000000..d6cdb5337 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "sync" +) + +// SelectObjectContentEventStreamReader provides the interface for reading events +// from a stream. +// +// The writer's Close method must allow multiple concurrent calls. +type SelectObjectContentEventStreamReader interface { + Events() <-chan types.SelectObjectContentEventStream + Close() error + Err() error +} + +type selectObjectContentEventStreamReader struct { + stream chan types.SelectObjectContentEventStream + decoder *eventstream.Decoder + eventStream io.ReadCloser + err *smithysync.OnceErr + payloadBuf []byte + done chan struct{} + closeOnce sync.Once +} + +func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader { + w := &selectObjectContentEventStreamReader{ + stream: make(chan types.SelectObjectContentEventStream), + decoder: decoder, + eventStream: readCloser, + err: smithysync.NewOnceErr(), + done: make(chan struct{}), + payloadBuf: make([]byte, 10*1024), + } + + go w.readEventStream() + + return w +} + +func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream { + return r.stream +} + +func (r *selectObjectContentEventStreamReader) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + r.payloadBuf = r.payloadBuf[0:0] + decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf) + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + return + default: + r.err.SetError(err) + return + } + } + + event, err := r.deserializeEventMessage(&decodedMessage) + if err != nil { + r.err.SetError(err) + return + } + + select { + case r.stream <- event: + case <-r.done: + return + } + + } +} + +func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) { + messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader) + if messageType == nil { + return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader) + } + + switch messageType.String() { + case eventstreamapi.EventMessageType: + var v types.SelectObjectContentEventStream + if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil { + return nil, err + } + return v, nil + + case eventstreamapi.ExceptionMessageType: + return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg) + + case eventstreamapi.ErrorMessageType: + errorCode := "UnknownError" + errorMessage := errorCode + if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil { + errorCode = header.String() + } + if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil { + errorMessage = header.String() + } + return nil, &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + default: + mc := msg.Clone() + return nil, &UnknownEventMessageError{ + Type: messageType.String(), + Message: &mc, + } + + } +} + +func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *selectObjectContentEventStreamReader) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *selectObjectContentEventStreamReader) safeClose() { + close(r.done) + r.eventStream.Close() + +} + +func (r *selectObjectContentEventStreamReader) Err() error { + return r.err.Err() +} + +func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} { + return r.done +} + +type awsRestxml_deserializeOpEventStreamSelectObjectContent struct { + LogEventStreamWrites bool + LogEventStreamReads bool +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string { + return "OperationEventStreamDeserializer" +} + +func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + defer func() { + if err == nil { + return + } + m.closeResponseBody(out) + }() + + logger := middleware.GetLogger(ctx) + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + _ = request + + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + _ = deserializeOutput + + output, ok := out.Result.(*SelectObjectContentOutput) + if out.Result != nil && !ok { + return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result) + } else if out.Result == nil { + output = &SelectObjectContentOutput{} + out.Result = output + } + + eventReader := newSelectObjectContentEventStreamReader( + deserializeOutput.Body, + eventstream.NewDecoder(func(options *eventstream.DecoderOptions) { + options.Logger = logger + options.LogMessages = m.LogEventStreamReads + + }), + ) + defer func() { + if err == nil { + return + } + _ = eventReader.Close() + }() + + output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) { + stream.Reader = eventReader + }) + + go output.eventStream.waitStreamClose() + + return out, metadata, nil +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) { + if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error { + if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{ + LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(), + LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(), + }, "OperationDeserializer", middleware.Before); err != nil { + return err + } + return nil + +} + +// UnknownEventMessageError provides an error when a message is received from the stream, +// but the reader is unable to determine what kind of message it is. +type UnknownEventMessageError struct { + Type string + Message *eventstream.Message +} + +// Error retruns the error message string. +func (e *UnknownEventMessageError) Error() string { + return "unknown event stream message type, " + e.Type +} + +func setSafeEventStreamClientLogMode(o *Options, operation string) { + switch operation { + case "SelectObjectContent": + toggleEventStreamClientLogMode(o, false, true) + return + + default: + return + + } +} +func toggleEventStreamClientLogMode(o *Options, request, response bool) { + mode := o.ClientLogMode + + if request && mode.IsRequestWithBody() { + mode.ClearRequestWithBody() + mode |= aws.LogRequest + } + + if response && mode.IsResponseWithBody() { + mode.ClearResponseWithBody() + mode |= aws.LogResponse + } + + o.ClientLogMode = mode + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go new file mode 100644 index 000000000..bbac9ca27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go @@ -0,0 +1,9 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// ExpressCredentialsProvider retrieves credentials for operations against the +// S3Express storage class. +type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go new file mode 100644 index 000000000..3b35a3e57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go @@ -0,0 +1,170 @@ +package s3 + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" + "github.com/aws/smithy-go/container/private/cache" + "github.com/aws/smithy-go/container/private/cache/lru" +) + +const s3ExpressCacheCap = 100 + +const s3ExpressRefreshWindow = 1 * time.Minute + +type cacheKey struct { + CredentialsHash string // hmac(sigv4 akid, sigv4 secret) + Bucket string +} + +func (c cacheKey) Slug() string { + return fmt.Sprintf("%s%s", c.CredentialsHash, c.Bucket) +} + +type sessionCredsCache struct { + mu sync.Mutex + cache cache.Cache +} + +func (c *sessionCredsCache) Get(key cacheKey) (*aws.Credentials, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + if v, ok := c.cache.Get(key); ok { + return v.(*aws.Credentials), true + } + return nil, false +} + +func (c *sessionCredsCache) Put(key cacheKey, creds *aws.Credentials) { + c.mu.Lock() + defer c.mu.Unlock() + + c.cache.Put(key, creds) +} + +// The default S3Express provider uses an LRU cache with a capacity of 100. +// +// Credentials will be refreshed asynchronously when a Retrieve() call is made +// for cached credentials within an expiry window (1 minute, currently +// non-configurable). +type defaultS3ExpressCredentialsProvider struct { + sf singleflight.Group + + client createSessionAPIClient + cache *sessionCredsCache + refreshWindow time.Duration + v4creds aws.CredentialsProvider // underlying credentials used for CreateSession +} + +type createSessionAPIClient interface { + CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) +} + +func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider { + return &defaultS3ExpressCredentialsProvider{ + cache: &sessionCredsCache{ + cache: lru.New(s3ExpressCacheCap), + }, + refreshWindow: s3ExpressRefreshWindow, + } +} + +// returns a cloned provider using new base credentials, used when per-op +// config mutations change the credentials provider +func (p *defaultS3ExpressCredentialsProvider) CloneWithBaseCredentials(v4creds aws.CredentialsProvider) *defaultS3ExpressCredentialsProvider { + return &defaultS3ExpressCredentialsProvider{ + client: p.client, + cache: p.cache, + refreshWindow: p.refreshWindow, + v4creds: v4creds, + } +} + +func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { + v4creds, err := p.v4creds.Retrieve(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("get sigv4 creds: %w", err) + } + + key := cacheKey{ + CredentialsHash: gethmac(v4creds.AccessKeyID, v4creds.SecretAccessKey), + Bucket: bucket, + } + creds, ok := p.cache.Get(key) + if !ok || creds.Expired() { + return p.awaitDoChanRetrieve(ctx, key) + } + + if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow { + p.doChanRetrieve(ctx, key) + } + + return *creds, nil +} + +func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, key cacheKey) <-chan singleflight.Result { + return p.sf.DoChan(key.Slug(), func() (interface{}, error) { + return p.retrieve(ctx, key) + }) +} + +func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { + ch := p.doChanRetrieve(ctx, key) + + select { + case r := <-ch: + return r.Val.(aws.Credentials), r.Err + case <-ctx.Done(): + return aws.Credentials{}, errors.New("s3express retrieve credentials canceled") + } +} + +func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { + resp, err := p.client.CreateSession(ctx, &CreateSessionInput{ + Bucket: aws.String(key.Bucket), + }) + if err != nil { + return aws.Credentials{}, err + } + + creds, err := credentialsFromResponse(resp) + if err != nil { + return aws.Credentials{}, err + } + + p.cache.Put(key, creds) + return *creds, nil +} + +func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) { + if o.Credentials == nil { + return nil, errors.New("s3express session credentials unset") + } + + if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil { + return nil, errors.New("s3express session credentials missing one or more required fields") + } + + return &aws.Credentials{ + AccessKeyID: *o.Credentials.AccessKeyId, + SecretAccessKey: *o.Credentials.SecretAccessKey, + SessionToken: *o.Credentials.SessionToken, + CanExpire: true, + Expires: *o.Credentials.Expiration, + }, nil +} + +func gethmac(p, key string) string { + hash := hmac.New(sha256.New, []byte(key)) + hash.Write([]byte(p)) + return string(hash.Sum(nil)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go new file mode 100644 index 000000000..7c7a7b424 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go @@ -0,0 +1,39 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// If the caller hasn't provided an S3Express provider, we use our default +// which will grab a reference to the S3 client itself in finalization. +func resolveExpressCredentials(o *Options) { + if o.ExpressCredentials == nil { + o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider() + } +} + +// Config finalizer: if we're using the default S3Express implementation, grab +// a reference to the client for its CreateSession API, and the underlying +// sigv4 credentials provider for cache keying. +func finalizeExpressCredentials(o *Options, c *Client) { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { + p.client = c + p.v4creds = o.Credentials + } +} + +// Operation config finalizer: update the sigv4 credentials on the default +// express provider in case it changed to ensure different cache keys +func finalizeOperationExpressCredentials(o *Options, c Client) { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { + o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) + } +} + +// NewFromConfig resolver: pull from opaque sources if it exists. +func resolveDisableExpressAuth(cfg aws.Config, o *Options) { + if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok { + o.DisableS3ExpressSessionAuth = &v + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json new file mode 100644 index 000000000..6e392285e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -0,0 +1,135 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", + "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AbortMultipartUpload.go", + "api_op_CompleteMultipartUpload.go", + "api_op_CopyObject.go", + "api_op_CreateBucket.go", + "api_op_CreateMultipartUpload.go", + "api_op_CreateSession.go", + "api_op_DeleteBucket.go", + "api_op_DeleteBucketAnalyticsConfiguration.go", + "api_op_DeleteBucketCors.go", + "api_op_DeleteBucketEncryption.go", + "api_op_DeleteBucketIntelligentTieringConfiguration.go", + "api_op_DeleteBucketInventoryConfiguration.go", + "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetricsConfiguration.go", + "api_op_DeleteBucketOwnershipControls.go", + "api_op_DeleteBucketPolicy.go", + "api_op_DeleteBucketReplication.go", + "api_op_DeleteBucketTagging.go", + "api_op_DeleteBucketWebsite.go", + "api_op_DeleteObject.go", + "api_op_DeleteObjectTagging.go", + "api_op_DeleteObjects.go", + "api_op_DeletePublicAccessBlock.go", + "api_op_GetBucketAccelerateConfiguration.go", + "api_op_GetBucketAcl.go", + "api_op_GetBucketAnalyticsConfiguration.go", + "api_op_GetBucketCors.go", + "api_op_GetBucketEncryption.go", + "api_op_GetBucketIntelligentTieringConfiguration.go", + "api_op_GetBucketInventoryConfiguration.go", + "api_op_GetBucketLifecycleConfiguration.go", + "api_op_GetBucketLocation.go", + "api_op_GetBucketLogging.go", + "api_op_GetBucketMetricsConfiguration.go", + "api_op_GetBucketNotificationConfiguration.go", + "api_op_GetBucketOwnershipControls.go", + "api_op_GetBucketPolicy.go", + "api_op_GetBucketPolicyStatus.go", + "api_op_GetBucketReplication.go", + "api_op_GetBucketRequestPayment.go", + "api_op_GetBucketTagging.go", + "api_op_GetBucketVersioning.go", + "api_op_GetBucketWebsite.go", + "api_op_GetObject.go", + "api_op_GetObjectAcl.go", + "api_op_GetObjectAttributes.go", + "api_op_GetObjectLegalHold.go", + "api_op_GetObjectLockConfiguration.go", + "api_op_GetObjectRetention.go", + "api_op_GetObjectTagging.go", + "api_op_GetObjectTorrent.go", + "api_op_GetPublicAccessBlock.go", + "api_op_HeadBucket.go", + "api_op_HeadObject.go", + "api_op_ListBucketAnalyticsConfigurations.go", + "api_op_ListBucketIntelligentTieringConfigurations.go", + "api_op_ListBucketInventoryConfigurations.go", + "api_op_ListBucketMetricsConfigurations.go", + "api_op_ListBuckets.go", + "api_op_ListDirectoryBuckets.go", + "api_op_ListMultipartUploads.go", + "api_op_ListObjectVersions.go", + "api_op_ListObjects.go", + "api_op_ListObjectsV2.go", + "api_op_ListParts.go", + "api_op_PutBucketAccelerateConfiguration.go", + "api_op_PutBucketAcl.go", + "api_op_PutBucketAnalyticsConfiguration.go", + "api_op_PutBucketCors.go", + "api_op_PutBucketEncryption.go", + "api_op_PutBucketIntelligentTieringConfiguration.go", + "api_op_PutBucketInventoryConfiguration.go", + "api_op_PutBucketLifecycleConfiguration.go", + "api_op_PutBucketLogging.go", + "api_op_PutBucketMetricsConfiguration.go", + "api_op_PutBucketNotificationConfiguration.go", + "api_op_PutBucketOwnershipControls.go", + "api_op_PutBucketPolicy.go", + "api_op_PutBucketReplication.go", + "api_op_PutBucketRequestPayment.go", + "api_op_PutBucketTagging.go", + "api_op_PutBucketVersioning.go", + "api_op_PutBucketWebsite.go", + "api_op_PutObject.go", + "api_op_PutObjectAcl.go", + "api_op_PutObjectLegalHold.go", + "api_op_PutObjectLockConfiguration.go", + "api_op_PutObjectRetention.go", + "api_op_PutObjectTagging.go", + "api_op_PutPublicAccessBlock.go", + "api_op_RestoreObject.go", + "api_op_SelectObjectContent.go", + "api_op_UploadPart.go", + "api_op_UploadPartCopy.go", + "api_op_WriteGetObjectResponse.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "eventstream.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "types/types_exported_test.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/s3", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go new file mode 100644 index 000000000..04c6fdbb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3 + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.53.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go new file mode 100644 index 000000000..6aae79e7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go @@ -0,0 +1,214 @@ +package s3 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// ListObjectVersionsAPIClient is a client that implements the ListObjectVersions +// operation +type ListObjectVersionsAPIClient interface { + ListObjectVersions(context.Context, *ListObjectVersionsInput, ...func(*Options)) (*ListObjectVersionsOutput, error) +} + +var _ ListObjectVersionsAPIClient = (*Client)(nil) + +// ListObjectVersionsPaginatorOptions is the paginator options for ListObjectVersions +type ListObjectVersionsPaginatorOptions struct { + // (Optional) The maximum number of Object Versions that you want Amazon S3 to + // return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListObjectVersionsPaginator is a paginator for ListObjectVersions +type ListObjectVersionsPaginator struct { + options ListObjectVersionsPaginatorOptions + client ListObjectVersionsAPIClient + params *ListObjectVersionsInput + firstPage bool + keyMarker *string + versionIDMarker *string + isTruncated bool +} + +// NewListObjectVersionsPaginator returns a new ListObjectVersionsPaginator +func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params *ListObjectVersionsInput, optFns ...func(*ListObjectVersionsPaginatorOptions)) *ListObjectVersionsPaginator { + if params == nil { + params = &ListObjectVersionsInput{} + } + + options := ListObjectVersionsPaginatorOptions{} + if params.MaxKeys != nil { + options.Limit = aws.ToInt32(params.MaxKeys) + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectVersionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + keyMarker: params.KeyMarker, + versionIDMarker: params.VersionIdMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListObjectVersionsPaginator) HasMorePages() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectVersions page. +func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.KeyMarker = p.keyMarker + params.VersionIdMarker = p.versionIDMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + if limit > 0 { + params.MaxKeys = aws.Int32(limit) + } + + result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.keyMarker + p.isTruncated = aws.ToBool(result.IsTruncated) + p.keyMarker = nil + p.versionIDMarker = nil + if aws.ToBool(result.IsTruncated) { + p.keyMarker = result.NextKeyMarker + p.versionIDMarker = result.NextVersionIdMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.keyMarker != nil && + *prevToken == *p.keyMarker { + p.isTruncated = false + } + + return result, nil +} + +// ListMultipartUploadsAPIClient is a client that implements the ListMultipartUploads +// operation +type ListMultipartUploadsAPIClient interface { + ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error) +} + +var _ ListMultipartUploadsAPIClient = (*Client)(nil) + +// ListMultipartUploadsPaginatorOptions is the paginator options for ListMultipartUploads +type ListMultipartUploadsPaginatorOptions struct { + // (Optional) The maximum number of Multipart Uploads that you want Amazon S3 to + // return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads +type ListMultipartUploadsPaginator struct { + options ListMultipartUploadsPaginatorOptions + client ListMultipartUploadsAPIClient + params *ListMultipartUploadsInput + firstPage bool + keyMarker *string + uploadIDMarker *string + isTruncated bool +} + +// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator +func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator { + if params == nil { + params = &ListMultipartUploadsInput{} + } + + options := ListMultipartUploadsPaginatorOptions{} + if params.MaxUploads != nil { + options.Limit = aws.ToInt32(params.MaxUploads) + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMultipartUploadsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + keyMarker: params.KeyMarker, + uploadIDMarker: params.UploadIdMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMultipartUploadsPaginator) HasMorePages() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListMultipartUploads page. +func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.KeyMarker = p.keyMarker + params.UploadIdMarker = p.uploadIDMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + if limit > 0 { + params.MaxUploads = aws.Int32(limit) + } + + result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.keyMarker + p.isTruncated = aws.ToBool(result.IsTruncated) + p.keyMarker = nil + p.uploadIDMarker = nil + if aws.ToBool(result.IsTruncated) { + p.keyMarker = result.NextKeyMarker + p.uploadIDMarker = result.NextUploadIdMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.keyMarker != nil && + *prevToken == *p.keyMarker { + p.isTruncated = false + } + + return result, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go new file mode 100644 index 000000000..97b5771bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go @@ -0,0 +1,106 @@ +package arn + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource. +func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != s3OutpostsNamespace { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + // Do not allow region-less outpost access-point arns. + if len(a.Region) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + if len(a.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go new file mode 100644 index 000000000..91b8fde0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go @@ -0,0 +1,21 @@ +package customizations + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type bucketKey struct{} + +// SetBucket stores a bucket name within the request context, which is required +// for a variety of custom S3 behaviors. +func SetBucket(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, bucketKey{}, bucket) +} + +// GetBucket retrieves a stored bucket name within a context. +func GetBucket(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go new file mode 100644 index 000000000..e1d1cbefa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go @@ -0,0 +1,79 @@ +/* +Package customizations provides customizations for the Amazon S3 API client. + +This package provides support for following S3 customizations + + ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type + + UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options + + RemoveBucket Middleware: removes a serialized bucket name from request url path + + processResponseWith200Error Middleware: Deserializing response error with 200 status code + +# Virtual Host style url addressing + +Since serializers serialize by default as path style url, we use customization +to modify the endpoint url when `UsePathStyle` option on S3Client is unset or +false. This flag will be ignored if `UseAccelerate` option is set to true. + +If UseAccelerate is not enabled, and the bucket name is not a valid hostname +label, they SDK will fallback to forcing the request to be made as if +UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. + +https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description + +# Transfer acceleration + +By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` +option on S3Client, one can enable s3 transfer acceleration support. Transfer +acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` +option if set is ignored. Transfer acceleration is not supported for S3 operations +DeleteBucket, ListBuckets, and CreateBucket. + +# Dualstack support + +By default dualstack support for s3 client is disabled. By enabling `UseDualstack` +option on s3 client, you can enable dualstack endpoint support. + +# Endpoint customizations + +Customizations to lookup ARN, process ARN needs to happen before request serialization. +UpdateEndpoint middleware which mutates resources based on Options such as +UseDualstack, UseAccelerate for modifying resolved endpoint are executed after +request serialization. Remove bucket middleware is executed after +an request is serialized, and removes the serialized bucket name from request path + + Middleware layering: + + Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step + + Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware + +Customization options: + + UseARNRegion (Disabled by Default) + + UsePathStyle (Disabled by Default) + + UseAccelerate (Disabled by Default) + + UseDualstack (Disabled by Default) + +# Handle Error response with 200 status code + +S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an +error Response with status code 2xx. The processResponseWith200Error middleware +customizations enables SDK to check for an error within response body prior to +deserialization. + +As the check for 2xx response containing an error needs to be performed earlier +than response deserialization. Since the behavior of Deserialization is in +reverse order to the other stack steps its easier to consider that "after" means +"before". + + Middleware layering: + + HTTP Response -> handle 200 error customization -> deserialize +*/ +package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go new file mode 100644 index 000000000..8cc0b3624 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go @@ -0,0 +1,44 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage +// class. +type S3ExpressCredentialsProvider interface { + Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) +} + +// ExpressIdentityResolver retrieves identity for the S3Express storage class. +type ExpressIdentityResolver struct { + Provider S3ExpressCredentialsProvider +} + +var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil) + +// GetIdentity retrieves AWS credentials using the underlying provider. +func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) ( + auth.Identity, error, +) { + bucket, ok := GetIdentityPropertiesBucket(&props) + if !ok { + bucket = GetBucket(ctx) + } + if bucket == "" { + return nil, fmt.Errorf("bucket name is missing") + } + + creds, err := v.Provider.Retrieve(ctx, bucket) + if err != nil { + return nil, fmt.Errorf("get credentials: %v", err) + } + + return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go new file mode 100644 index 000000000..bb22d3474 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go @@ -0,0 +1,18 @@ +package customizations + +type s3DisableExpressAuthProvider interface { + GetS3DisableExpressAuth() (bool, bool) +} + +// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config +// sources. +func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) { + for _, cfg := range configs { + if p, ok := cfg.(s3DisableExpressAuthProvider); ok { + if value, exists = p.GetS3DisableExpressAuth(); exists { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go new file mode 100644 index 000000000..cf3ff5966 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go @@ -0,0 +1,42 @@ +package customizations + +import ( + "context" + "fmt" + + ictx "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + "github.com/aws/smithy-go/middleware" +) + +type expressDefaultChecksumMiddleware struct{} + +func (*expressDefaultChecksumMiddleware) ID() string { + return "expressDefaultChecksum" +} + +func (*expressDefaultChecksumMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" { + ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32)) + } + return next.HandleFinalize(ctx, in) +} + +// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for +// S3Express requests. This should only be applied to operations where a +// checksum is required (e.g. DeleteObject). +func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error { + err := s.Finalize.Insert( + &expressDefaultChecksumMiddleware{}, + "AWSChecksum:ComputeInputPayloadChecksum", + middleware.Before, + ) + if err != nil { + return fmt.Errorf("add expressDefaultChecksum: %v", err) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go new file mode 100644 index 000000000..171de4613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go @@ -0,0 +1,21 @@ +package customizations + +import "github.com/aws/smithy-go" + +// GetPropertiesBackend returns a resolved endpoint backend from the property +// set. +func GetPropertiesBackend(p *smithy.Properties) string { + v, _ := p.Get("backend").(string) + return v +} + +// GetIdentityPropertiesBucket returns the S3 bucket from identity properties. +func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) { + v, ok := ip.Get(bucketKey{}).(string) + return v, ok +} + +// SetIdentityPropertiesBucket sets the S3 bucket to identity properties. +func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) { + ip.Set(bucketKey{}, bucket) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go new file mode 100644 index 000000000..545e5b220 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go @@ -0,0 +1,109 @@ +package customizations + +import ( + "context" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" +) + +const ( + s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express" + headerAmzSessionToken = "x-amz-s3session-token" +) + +// adapts a v4 signer for S3Express +type s3ExpressSignerAdapter struct { + v4 v4.HTTPSigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +// adapts S3ExpressCredentialsProvider to the standard AWS +// CredentialsProvider interface +type s3ExpressCredentialsAdapter struct { + provider S3ExpressCredentialsProvider + bucket string +} + +func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) { + return c.provider.Retrieve(ctx, c.bucket) +} + +// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests. +// +// This is NOT mutually exclusive with existing v4 or v4a signer handling on +// the stack itself, but only one handler will actually perform signing based +// on the provided signing version in the context. +type S3ExpressSignHTTPRequestMiddleware struct { + Credentials S3ExpressCredentialsProvider + Signer v4.HTTPSigner + LogSigning bool +} + +// ID identifies S3ExpressSignHTTPRequestMiddleware. +func (*S3ExpressSignHTTPRequestMiddleware) ID() string { + return "S3ExpressSigning" +} + +// HandleFinalize will sign the request if the S3Express signer has been +// selected. +func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if GetSignerVersion(ctx) != s3ExpressSignerVersion { + return next.HandleFinalize(ctx, in) + } + + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: m.credentialsAdapter(ctx), + Signer: m.signerAdapter(), + LogSigning: m.LogSigning, + }) + return mw.HandleFinalize(ctx, in, next) +} + +func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider { + return &s3ExpressCredentialsAdapter{ + provider: m.Credentials, + bucket: GetBucket(ctx), + } +} + +func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner { + return &s3ExpressSignerAdapter{v4: m.Signer} +} + +type s3ExpressPresignerAdapter struct { + v4 v4.HTTPPresigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) ( + string, http.Header, error, +) { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +var ( + _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{} + _ v4.HTTPSigner = &s3ExpressSignerAdapter{} +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go new file mode 100644 index 000000000..e3ec7f011 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go @@ -0,0 +1,61 @@ +package customizations + +import ( + "context" + "fmt" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ExpressSigner signs requests for the sigv4-s3express auth scheme. +// +// This signer respects the aws.auth#sigv4 properties for signing name and +// region. +type ExpressSigner struct { + Signer v4.HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*ExpressSigner)(nil) + +// SignRequest signs the request with the provided identity. +func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*internalauthsmithy.CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4 signing name is required for s3express variant") + } + + region, ok := smithyhttp.GetSigV4SigningRegion(&props) + if !ok { + return fmt.Errorf("sigv4 signing region is required for s3express variant") + } + + hash := v4.GetPayloadHash(ctx) + + r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + o.DisableSessionToken = true + + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %v", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go new file mode 100644 index 000000000..2b11b1fa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go @@ -0,0 +1,74 @@ +package customizations + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HandleResponseErrorWith200Status check for S3 200 error response. +// If an s3 200 error is found, status code for the response is modified temporarily to +// 5xx response status code. +func HandleResponseErrorWith200Status(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After) +} + +// middleware to process raw response and look for error response with 200 status code +type processResponseFor200ErrorMiddleware struct{} + +// ID returns the middleware ID. +func (*processResponseFor200ErrorMiddleware) ID() string { + return "S3:ProcessResponseFor200Error" +} + +func (m *processResponseFor200ErrorMiddleware) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + // check if response status code is 2xx. + if response.StatusCode < 200 || response.StatusCode >= 300 { + return + } + + var readBuff bytes.Buffer + body := io.TeeReader(response.Body, &readBuff) + + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("received empty response payload"), + } + } + + // rewind response body + response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body)) + + // if start tag is "Error", the response is consider error response. + if strings.EqualFold(t.Name.Local, "Error") { + // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ + // 200 error responses are similar to 5xx errors. + response.StatusCode = 500 + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go new file mode 100644 index 000000000..87f7a2232 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go @@ -0,0 +1,22 @@ +package customizations + +import ( + "github.com/aws/smithy-go/transport/http" + "strings" +) + +func updateS3HostForS3AccessPoint(req *http.Request) { + updateHostPrefix(req, "s3", s3AccessPoint) +} + +func updateS3HostForS3ObjectLambda(req *http.Request) { + updateHostPrefix(req, "s3", s3ObjectLambda) +} + +func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix + req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go new file mode 100644 index 000000000..f4bbb4b6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go @@ -0,0 +1,49 @@ +package customizations + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddExpiresOnPresignedURL represents a build middleware used to assign +// expiration on a presigned URL. +type AddExpiresOnPresignedURL struct { + + // Expires is time.Duration within which presigned url should be expired. + // This should be the duration in seconds the presigned URL should be considered valid for. + // By default the S3 presigned url expires in 15 minutes ie. 900 seconds. + Expires time.Duration +} + +// ID representing the middleware +func (*AddExpiresOnPresignedURL) ID() string { + return "S3:AddExpiresOnPresignedURL" +} + +// HandleBuild handles the build step middleware behavior +func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // if expiration is unset skip this middleware + if m.Expires == 0 { + // default to 15 * time.Minutes + m.Expires = 15 * time.Minute + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + // set S3 X-AMZ-Expires header + query := req.URL.Query() + query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10)) + req.URL.RawQuery = query.Encode() + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go new file mode 100644 index 000000000..bbc971f2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go @@ -0,0 +1,568 @@ +package customizations + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" + s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" +) + +const ( + s3AccessPoint = "s3-accesspoint" + s3ObjectLambda = "s3-object-lambda" +) + +// processARNResource is used to process an ARN resource. +type processARNResource struct { + + // UseARNRegion indicates if region parsed from an ARN should be used. + UseARNRegion bool + + // UseAccelerate indicates if s3 transfer acceleration is enabled + UseAccelerate bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// ID returns the middleware ID. +func (*processARNResource) ID() string { return "S3:ProcessARNResource" } + +func (m *processARNResource) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + // check if arn was provided, if not skip this middleware + arnValue, ok := s3shared.GetARNResourceFromContext(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // parse arn into an endpoint arn wrt to service + resource, err := s3arn.ParseEndpointARN(arnValue) + if err != nil { + return out, metadata, err + } + + // build a resource request struct + resourceRequest := s3shared.ResourceRequest{ + Resource: resource, + UseARNRegion: m.UseARNRegion, + UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled, + RequestRegion: awsmiddleware.GetRegion(ctx), + SigningRegion: awsmiddleware.GetSigningRegion(ctx), + PartitionID: awsmiddleware.GetPartitionID(ctx), + } + + // switch to correct endpoint updater + switch tv := resource.(type) { + case arn.AccessPointARN: + // multi-region arns do not need to validate for cross partition request + if len(tv.Region) != 0 { + // validate resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + } + + // Special handling for region-less ap-arns. + if len(tv.Region) == 0 { + // check if multi-region arn support is disabled + if m.DisableMultiRegionAccessPoints { + return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled") + } + + // Do not allow dual-stack configuration with multi-region arns. + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + // check if request region is FIPS + if resourceRequest.UseFIPS && len(resolveRegion) == 0 { + // Do not allow Fips support within multi-region arns. + return out, metadata, s3shared.NewClientConfiguredForFIPSError( + tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + var requestBuilder func(context.Context, accesspointOptions) (context.Context, error) + if len(resolveRegion) == 0 { + requestBuilder = buildMultiRegionAccessPointsRequest + } else { + requestBuilder = buildAccessPointRequest + } + + // build request as per accesspoint builder + ctx, err = requestBuilder(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + case arn.S3ObjectLambdaAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dualstack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + + // build access point request + ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv.AccessPointARN, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + // process outpost accesspoint ARN + case arn.OutpostAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dual stack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if request region is FIPS + if resourceRequest.UseFIPS { + return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID, + resourceRequest.RequestRegion, nil) + } + + // build outpost access point request + ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{ + processARNResource: *m, + resource: tv, + request: req, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + default: + return out, metadata, s3shared.NewInvalidARNError(resource, nil) + } + + return next.HandleSerialize(ctx, in) +} + +// validate if s3 resource and request region config is compatible. +func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error { + // check if resourceRequest leads to a cross partition error + v, err := resourceRequest.IsCrossPartition() + if err != nil { + return err + } + if v { + // if cross partition + return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if resourceRequest leads to a cross region error + if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() { + // if cross region, but not use ARN region is not enabled + return s3shared.NewClientRegionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + return nil +} + +// === Accesspoint ========== + +type accesspointOptions struct { + processARNResource + request *http.Request + resource arn.AccessPointARN + resolveRegion string + partitionID string + requestRegion string +} + +func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-accesspoint" + ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateS3HostForS3AccessPoint(req) + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-object-lambda" + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata { + updateS3HostForS3ObjectLambda(req) + } + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + const s3GlobalLabel = "s3-global." + const accesspointLabel = "accesspoint." + + tv := options.resource + req := options.request + resolveService := tv.Service + resolveRegion := options.requestRegion + arnPartition := tv.Partition + + // resolve endpoint + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // set signing region and version for MRAP + endpoint.SigningRegion = "*" + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = SetSignerVersion(ctx, v4a.Version) + + if len(endpoint.SigningName) != 0 { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + // modify endpoint host to use s3-global host prefix + scheme := strings.SplitN(endpoint.URL, "://", 2) + dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero) + if err != nil { + return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err) + } + // set url as per partition + endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // build access point host prefix + accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel + + // add host prefix to url + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv) + } + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + return ctx, nil +} + +func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) { + // add host prefix for access point + accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} + +// ====== Outpost Accesspoint ======== + +type outpostAccessPointOptions struct { + processARNResource + request *http.Request + resource arn.OutpostAccessPointARN + partitionID string + requestRegion string +} + +func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) { + tv := options.resource + req := options.request + + resolveRegion := tv.Region + resolveService := tv.Service + endpointsID := resolveService + if strings.EqualFold(resolveService, "s3-outposts") { + // assign endpoints ID as "S3" + endpointsID = "s3" + } + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" + + // resolve regional endpoint for resolved region. + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // assign resolved service from arn as signing name + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + // redirect signer to use resolved endpoint signing name and region + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to resolved service id + ctx = awsmiddleware.SetServiceID(ctx, resolveService) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip further customizations, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateHostPrefix(req, endpointsID, resolveService) + + // add host prefix for s3-outposts + outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "." + req.URL.Host = outpostAPHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = outpostAPHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go new file mode 100644 index 000000000..cf3f4dc8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go @@ -0,0 +1,63 @@ +package customizations + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// removeBucketFromPathMiddleware needs to be executed after serialize step is performed +type removeBucketFromPathMiddleware struct { +} + +func (m *removeBucketFromPathMiddleware) ID() string { + return "S3:RemoveBucketFromPathMiddleware" +} + +func (m *removeBucketFromPathMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + // check if a bucket removal from HTTP path is required + bucket, ok := getRemoveBucketFromPath(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + removeBucketFromPath(req.URL, bucket) + return next.HandleSerialize(ctx, in) +} + +type removeBucketKey struct { + bucket string +} + +// setBucketToRemoveOnContext sets the bucket name to be removed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, removeBucketKey{}, bucket) +} + +// getRemoveBucketFromPath returns the bucket name to remove from the path. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRemoveBucketFromPath(ctx context.Context) (string, bool) { + v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go new file mode 100644 index 000000000..6e1d44724 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go @@ -0,0 +1,88 @@ +package customizations + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "net/url" +) + +type s3ObjectLambdaEndpoint struct { + // whether the operation should use the s3-object-lambda endpoint + UseEndpoint bool + + // use transfer acceleration + UseAccelerate bool + + EndpointResolver EndpointResolver + EndpointResolverOptions EndpointResolverOptions +} + +func (t *s3ObjectLambdaEndpoint) ID() string { + return "S3:ObjectLambdaEndpoint" +} + +func (t *s3ObjectLambdaEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + if !t.UseEndpoint { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation") + } + + if t.UseAccelerate { + return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation") + } + + region := awsmiddleware.GetRegion(ctx) + + ero := t.EndpointResolverOptions + + endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero) + if err != nil { + return out, metadata, err + } + + // Set the ServiceID and SigningName + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, err + } + + if len(endpoint.SigningRegion) > 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, region) + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable { + updateS3HostForS3ObjectLambda(req) + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go new file mode 100644 index 000000000..756823cb7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go @@ -0,0 +1,227 @@ +package customizations + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/smithy-go/middleware" +) + +type signerVersionKey struct{} + +// GetSignerVersion retrieves the signer version to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSignerVersion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string) + return v +} + +// SetSignerVersion sets the signer version to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSignerVersion(ctx context.Context, version string) context.Context { + return middleware.WithStackValue(ctx, signerVersionKey{}, version) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + + // credential provider + CredentialsProvider aws.CredentialsProvider + + // log signing + LogSigning bool + + // v4 signer + V4Signer v4.HTTPSigner + + //v4a signer + V4aSigner v4a.HTTPSigner +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + v4Signer: options.V4Signer, + v4aSigner: options.V4aSigner, + logSigning: options.LogSigning, + } +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method +type SignHTTPRequestMiddleware struct { + + // credential provider + credentialsProvider aws.CredentialsProvider + + // log signing + logSigning bool + + // v4 signer + v4Signer v4.HTTPSigner + + //v4a signer + v4aSigner v4a.HTTPSigner +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and handle signing for either +// SigV4 or SigV4A as called for. +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + sv := GetSignerVersion(ctx) + + if strings.EqualFold(sv, v4.Version) { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: s.credentialsProvider, + Signer: s.v4Signer, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } else if strings.EqualFold(sv, v4a.Version) { + v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) + if !ok { + return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") + } + + mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{ + Credentials: v4aCredentialProvider, + Signer: s.v4aSigner, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } + + return next.HandleFinalize(ctx, in) +} + +// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + ExpressCredentials S3ExpressCredentialsProvider + V4Presigner v4.HTTPPresigner + V4aPresigner v4a.HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + + // cred provider and signer for sigv4 + credentialsProvider aws.CredentialsProvider + + // s3Express credentials + expressCredentials S3ExpressCredentialsProvider + + // sigV4 signer + v4Signer v4.HTTPPresigner + + // sigV4a signer + v4aSigner v4a.HTTPPresigner + + // log signing + logSigning bool +} + +// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + expressCredentials: options.ExpressCredentials, + v4Signer: options.V4Presigner, + v4aSigner: options.V4aPresigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 or SigV4a presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (p *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + // fetch signer type from context + signerVersion := GetSignerVersion(ctx) + + switch signerVersion { + case "aws.auth#sigv4a": + mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: &v4a.SymmetricCredentialAdaptor{ + SymmetricProvider: p.credentialsProvider, + }, + Presigner: p.v4aSigner, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + case "aws.auth#sigv4": + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: p.credentialsProvider, + Presigner: p.v4Signer, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + case s3ExpressSignerVersion: + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: &s3ExpressCredentialsAdapter{ + provider: p.expressCredentials, + bucket: GetBucket(ctx), + }, + Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer}, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + default: + return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion) + } +} + +// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) { + const signedID = "PresignHTTPRequest" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go new file mode 100644 index 000000000..eedfc7eef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go @@ -0,0 +1,310 @@ +package customizations + +import ( + "context" + "fmt" + "log" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// UpdateEndpointParameterAccessor represents accessor functions used by the middleware +type UpdateEndpointParameterAccessor struct { + // functional pointer to fetch bucket name from provided input. + // The function is intended to take an input value, and + // return a string pointer to value of string, and bool if + // input has no bucket member. + GetBucketFromInput func(interface{}) (*string, bool) +} + +// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup. +type UpdateEndpointOptions struct { + // Accessor are parameter accessors used by the middleware + Accessor UpdateEndpointParameterAccessor + + // use path style + UsePathStyle bool + + // use transfer acceleration + UseAccelerate bool + + // indicates if an operation supports s3 transfer acceleration. + SupportsAccelerate bool + + // use ARN region + UseARNRegion bool + + // Indicates that the operation should target the s3-object-lambda endpoint. + // Used to direct operations that do not route based on an input ARN. + TargetS3ObjectLambda bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions. +func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) { + const serializerID = "OperationSerializer" + + // initial arn look up middleware + err = stack.Initialize.Insert(&s3shared.ARNLookup{ + GetARNValue: options.Accessor.GetBucketFromInput, + }, "legacyEndpointContextSetter", middleware.After) + if err != nil { + return err + } + + // process arn + err = stack.Serialize.Insert(&processARNResource{ + UseARNRegion: options.UseARNRegion, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // process whether the operation requires the s3-object-lambda endpoint + // Occurs before operation serializer so that hostPrefix mutations + // can be handled correctly. + err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{ + UseEndpoint: options.TargetS3ObjectLambda, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // remove bucket arn middleware + err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After) + if err != nil { + return err + } + + // update endpoint to use options for path style and accelerate + err = stack.Serialize.Insert(&updateEndpoint{ + usePathStyle: options.UsePathStyle, + getBucketFromInput: options.Accessor.GetBucketFromInput, + useAccelerate: options.UseAccelerate, + supportsAccelerate: options.SupportsAccelerate, + }, serializerID, middleware.After) + if err != nil { + return err + } + + return err +} + +type updateEndpoint struct { + // path style options + usePathStyle bool + getBucketFromInput func(interface{}) (*string, bool) + + // accelerate options + useAccelerate bool + supportsAccelerate bool +} + +// ID returns the middleware ID. +func (*updateEndpoint) ID() string { + return "S3:UpdateEndpoint" +} + +func (u *updateEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + // if arn was processed, skip this middleware + if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { + return next.HandleSerialize(ctx, in) + } + + // skip this customization if host name is set as immutable + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // check if accelerate is supported + if u.useAccelerate && !u.supportsAccelerate { + // accelerate is not supported, thus will be ignored + log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.") + u.useAccelerate = false + } + + // transfer acceleration is not supported with path style urls + if u.useAccelerate && u.usePathStyle { + log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.") + u.usePathStyle = false + } + + if u.getBucketFromInput != nil { + // Below customization only apply if bucket name is provided + bucket, ok := u.getBucketFromInput(in.Parameters) + if ok && bucket != nil { + region := awsmiddleware.GetRegion(ctx) + if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil { + return out, metadata, err + } + } + } + + return next.HandleSerialize(ctx, in) +} + +func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error { + // do nothing if path style is enforced + if u.usePathStyle { + return nil + } + + if !hostCompatibleBucketName(req.URL, bucket) { + // bucket name must be valid to put into the host for accelerate operations. + // For non-accelerate operations the bucket name can stay in the path if + // not valid hostname. + var err error + if u.useAccelerate { + err = fmt.Errorf("bucket name %s is not compatible with S3", bucket) + } + + // No-Op if not using accelerate. + return err + } + + // accelerate is only supported if use path style is disabled + if u.useAccelerate { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host) + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + + for i := 1; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + // move bucket to follow virtual host style + moveBucketNameToHost(req.URL, bucket) + return nil +} + +// updates endpoint to use virtual host styling +func moveBucketNameToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u, bucket) +} + +// remove bucket from url +func removeBucketFromPath(u *url.URL, bucket string) { + if strings.HasPrefix(u.Path, "/"+bucket) { + // modify url path + u.Path = strings.Replace(u.Path, "/"+bucket, "", 1) + + // modify url raw path + u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1) + } + + if u.Path == "" { + u.Path = "/" + } + + if u.RawPath == "" { + u.RawPath = "/" + } +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if the bucket is not +// DNS compatible or the EndpointResolver resolves an aws.Endpoint with +// HostnameImmutable member set to true. +// +// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + if strings.Contains(bucket, "..") { + return false + } + + // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping + if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) { + return false + } + + for _, c := range bucket[1:] { + if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) { + return false + } + } + + // checks for `^(\d+\.){3}\d+$` IPaddressing + v := strings.SplitN(bucket, ".", -1) + if len(v) == 4 { + for _, c := range bucket { + if !((c > 47 && c < 58) || c == 46) { + // we confirm that this is not a IP address + return true + } + } + // this is a IP address + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go new file mode 100644 index 000000000..f3e6b0751 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -0,0 +1,1056 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver S3 endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-ca-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "s3-external-1", + }: endpoints.Endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-iso-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-iso-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-isob-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, +} + +// GetDNSSuffix returns the dnsSuffix URL component for the given partition id +func GetDNSSuffix(id string, options Options) (string, error) { + variant := transformToSharedOptions(options).GetEndpointVariant() + switch { + case strings.EqualFold(id, "aws"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-cn"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "api.amazonwebservices.com.cn", nil + + case 0: + return "amazonaws.com.cn", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso"): + switch variant { + case endpoints.FIPSVariant: + return "c2s.ic.gov", nil + + case 0: + return "c2s.ic.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-b"): + switch variant { + case endpoints.FIPSVariant: + return "sc2s.sgov.gov", nil + + case 0: + return "sc2s.sgov.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-e"): + switch variant { + case endpoints.FIPSVariant: + return "cloud.adc-e.uk", nil + + case 0: + return "cloud.adc-e.uk", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-f"): + switch variant { + case endpoints.FIPSVariant: + return "csp.hci.ic.gov", nil + + case 0: + return "csp.hci.ic.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-us-gov"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + default: + return "", fmt.Errorf("unknown partition") + + } +} + +// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and +// options. +func GetDNSSuffixFromRegion(region string, options Options) (string, error) { + switch { + case partitionRegexp.Aws.MatchString(region): + return GetDNSSuffix("aws", options) + + case partitionRegexp.AwsCn.MatchString(region): + return GetDNSSuffix("aws-cn", options) + + case partitionRegexp.AwsIso.MatchString(region): + return GetDNSSuffix("aws-iso", options) + + case partitionRegexp.AwsIsoB.MatchString(region): + return GetDNSSuffix("aws-iso-b", options) + + case partitionRegexp.AwsIsoE.MatchString(region): + return GetDNSSuffix("aws-iso-e", options) + + case partitionRegexp.AwsIsoF.MatchString(region): + return GetDNSSuffix("aws-iso-f", options) + + case partitionRegexp.AwsUsGov.MatchString(region): + return GetDNSSuffix("aws-us-gov", options) + + default: + return GetDNSSuffix("aws", options) + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go new file mode 100644 index 000000000..064bcefb4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -0,0 +1,314 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: + // 100-continue} header. Setting to -1 will disable adding the Expect header to + // requests; setting to 0 will set the threshold to default 2MB + ContinueHeaderThresholdBytes int64 + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // Allows you to disable S3 Multi-Region access points feature. + DisableMultiRegionAccessPoints bool + + // Disables this client's usage of Session Auth for S3Express buckets and reverts + // to using conventional SigV4 for those. + DisableS3ExpressSessionAuth *bool + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // The credentials provider for S3Express requests. + ExpressCredentials ExpressCredentialsProvider + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // Allows you to enable arn region support for the service. + UseARNRegion bool + + // Allows you to enable S3 Accelerate feature. All operations compatible with S3 + // Accelerate will use the accelerate endpoint for requests. Requests not + // compatible will fall back to normal S3 requests. The bucket must be enabled for + // accelerate to be used with S3 client with accelerate enabled. If the bucket is + // not enabled for accelerate an error will be returned. The bucket name must be + // DNS compatible to work with accelerate. + UseAccelerate bool + + // Allows you to enable dual-stack endpoint support for the service. + // + // Deprecated: Set dual-stack by setting UseDualStackEndpoint on + // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint + // field is set it overrides this field value. + UseDualstack bool + + // Allows you to enable the client to use path-style addressing, i.e., + // https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual + // hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ). + UsePathStyle bool + + // Signature Version 4a (SigV4a) Signer + httpSignerV4a httpSignerV4a + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "com.amazonaws.s3#sigv4express" { + return getExpressIdentityResolver(o) + } + if schemeID == "aws.auth#sigv4a" { + return getSigV4AIdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func getSigV4AIdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &v4a.CredentialsProviderAdapter{ + Provider: &v4a.SymmetricCredentialAdaptor{ + SymmetricProvider: o.Credentials, + }, + } + } + return nil +} + +// WithSigV4ASigningRegions applies an override to the authentication workflow to +// use the given signing region set for SigV4A-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region set from both auth scheme resolution and endpoint +// resolution. +func WithSigV4ASigningRegions(regions []string) func(*Options) { + fn := func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, + ) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, regions) + return next.HandleFinalize(ctx, in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Finalize.Insert( + middleware.FinalizeMiddlewareFunc("withSigV4ASigningRegions", fn), + "Signing", + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} + +func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver { + if o.ExpressCredentials != nil { + return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials} + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go new file mode 100644 index 000000000..4e34d1a22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "context" + "fmt" + "path" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// serializeImmutableHostnameBucketMiddleware handles injecting the bucket name into +// "immutable" hostnames resolved via v1 EndpointResolvers. This CANNOT be done in +// serialization, since v2 endpoint resolution requires removing the {Bucket} path +// segment from all S3 requests. +// +// This will only be done for non-ARN buckets, as the features that use those require +// virtualhost manipulation to function and we previously (pre-ep2) expected the caller +// to handle that in their resolver. +type serializeImmutableHostnameBucketMiddleware struct { + UsePathStyle bool +} + +func (*serializeImmutableHostnameBucketMiddleware) ID() string { + return "serializeImmutableHostnameBucket" +} + +func (m *serializeImmutableHostnameBucketMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + bucket, ok := bucketFromInput(in.Parameters) + if !ok { + return next.HandleSerialize(ctx, in) + } + + // a bucket being un-vhostable will also force us to use path style + usePathStyle := m.UsePathStyle || !awsrulesfn.IsVirtualHostableS3Bucket(bucket, request.URL.Scheme != "https") + + if !smithyhttp.GetHostnameImmutable(ctx) && + !(awsmiddleware.GetRequiresLegacyEndpoints(ctx) && usePathStyle) { + return next.HandleSerialize(ctx, in) + } + + parsedBucket := awsrulesfn.ParseARN(bucket) + + // disallow ARN buckets except for MRAP arns + if parsedBucket != nil && len(parsedBucket.Region) > 0 { + return next.HandleSerialize(ctx, in) + } + + request.URL.Path = path.Join(request.URL.Path, bucket) + request.URL.RawPath = path.Join(request.URL.RawPath, httpbinding.EscapePath(bucket, true)) + + return next.HandleSerialize(ctx, in) +} + +func addSerializeImmutableHostnameBucketMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert( + &serializeImmutableHostnameBucketMiddleware{ + UsePathStyle: options.UsePathStyle, + }, + "OperationSerializer", + middleware.Before, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go new file mode 100644 index 000000000..59524bdcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -0,0 +1,12872 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "strconv" + "strings" +) + +type awsRestxml_serializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AbortMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=AbortMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MultipartUpload != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompleteMultipartUpload", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCopyObject struct { +} + +func (*awsRestxml_serializeOpCopyObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CopyObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CopyObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.MetadataDirective) > 0 { + locationName := "X-Amz-Metadata-Directive" + encoder.SetHeader(locationName).String(string(v.MetadataDirective)) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if len(v.TaggingDirective) > 0 { + locationName := "X-Amz-Tagging-Directive" + encoder.SetHeader(locationName).String(string(v.TaggingDirective)) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpCreateBucket struct { +} + +func (*awsRestxml_serializeOpCreateBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CreateBucketConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CreateBucketConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.ObjectLockEnabledForBucket != nil { + locationName := "X-Amz-Bucket-Object-Lock-Enabled" + encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket) + } + + if len(v.ObjectOwnership) > 0 { + locationName := "X-Amz-Object-Ownership" + encoder.SetHeader(locationName).String(string(v.ObjectOwnership)) + } + + return nil +} + +type awsRestxml_serializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpCreateSession struct { +} + +func (*awsRestxml_serializeOpCreateSession) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?session") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.SessionMode) > 0 { + locationName := "X-Amz-Create-Session-Mode" + encoder.SetHeader(locationName).String(string(v.SessionMode)) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucket struct { +} + +func (*awsRestxml_serializeOpDeleteBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObject struct { +} + +func (*awsRestxml_serializeOpDeleteObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=DeleteObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.BypassGovernanceRetention != nil { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjects struct { +} + +func (*awsRestxml_serializeOpDeleteObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Delete != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Delete", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.BypassGovernanceRetention != nil { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAcl struct { +} + +func (*awsRestxml_serializeOpGetBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=GetBucketAnalyticsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketCors struct { +} + +func (*awsRestxml_serializeOpGetBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=GetBucketInventoryConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLocation struct { +} + +func (*awsRestxml_serializeOpGetBucketLocation) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLocationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?location") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLogging struct { +} + +func (*awsRestxml_serializeOpGetBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=GetBucketMetricsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?policyStatus") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketReplication struct { +} + +func (*awsRestxml_serializeOpGetBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketTagging struct { +} + +func (*awsRestxml_serializeOpGetBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObject struct { +} + +func (*awsRestxml_serializeOpGetObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=GetObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAcl struct { +} + +func (*awsRestxml_serializeOpGetObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?attributes") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != nil { + locationName := "X-Amz-Max-Parts" + encoder.SetHeader(locationName).Integer(*v.MaxParts) + } + + if v.ObjectAttributes != nil { + locationName := "X-Amz-Object-Attributes" + for i := range v.ObjectAttributes { + if len(v.ObjectAttributes[i]) > 0 { + escaped := string(v.ObjectAttributes[i]) + if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.ObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + locationName := "X-Amz-Part-Number-Marker" + encoder.SetHeader(locationName).String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectRetention struct { +} + +func (*awsRestxml_serializeOpGetObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTagging struct { +} + +func (*awsRestxml_serializeOpGetObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTorrentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?torrent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadBucket struct { +} + +func (*awsRestxml_serializeOpHeadBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadObject struct { +} + +func (*awsRestxml_serializeOpHeadObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=ListBucketAnalyticsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + return nil +} + +type awsRestxml_serializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=ListBucketInventoryConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=ListBucketMetricsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBuckets struct { +} + +func (*awsRestxml_serializeOpListBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +type awsRestxml_serializeOpListDirectoryBuckets struct { +} + +func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDirectoryBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxDirectoryBuckets != nil { + encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets) + } + + return nil +} + +type awsRestxml_serializeOpListMultipartUploads struct { +} + +func (*awsRestxml_serializeOpListMultipartUploads) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMultipartUploadsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?uploads") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxUploads != nil { + encoder.SetQuery("max-uploads").Integer(*v.MaxUploads) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.UploadIdMarker != nil { + encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListObjects struct { +} + +func (*awsRestxml_serializeOpListObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Marker != nil { + encoder.SetQuery("marker").String(*v.Marker) + } + + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpListObjectsV2 struct { +} + +func (*awsRestxml_serializeOpListObjectsV2) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsV2Input) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?list-type=2") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.FetchOwner != nil { + encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner) + } + + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.StartAfter != nil { + encoder.SetQuery("start-after").String(*v.StartAfter) + } + + return nil +} + +type awsRestxml_serializeOpListObjectVersions struct { +} + +func (*awsRestxml_serializeOpListObjectVersions) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectVersionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?versions") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionIdMarker != nil { + encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListParts struct { +} + +func (*awsRestxml_serializeOpListParts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListPartsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=ListParts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != nil { + encoder.SetQuery("max-parts").Integer(*v.MaxParts) + } + + if v.PartNumberMarker != nil { + encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccelerateConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccelerateConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAcl struct { +} + +func (*awsRestxml_serializeOpPutBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AnalyticsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AnalyticsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketCors struct { +} + +func (*awsRestxml_serializeOpPutBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CORSConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ServerSideEncryptionConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ServerSideEncryptionConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.IntelligentTieringConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IntelligentTieringConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.InventoryConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InventoryConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LifecycleConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LifecycleConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLogging struct { +} + +func (*awsRestxml_serializeOpPutBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.BucketLoggingStatus != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketLoggingStatus", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetricsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetricsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.NotificationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NotificationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.SkipDestinationValidation != nil { + locationName := "X-Amz-Skip-Destination-Validation" + encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.OwnershipControls != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OwnershipControls", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("text/plain") + } + + if input.Policy != nil { + payload := strings.NewReader(*input.Policy) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ConfirmRemoveSelfBucketAccess != nil { + locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" + encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketReplication struct { +} + +func (*awsRestxml_serializeOpPutBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ReplicationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RequestPaymentConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestPaymentConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketTagging struct { +} + +func (*awsRestxml_serializeOpPutBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.VersioningConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersioningConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.WebsiteConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "WebsiteConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutObject struct { +} + +func (*awsRestxml_serializeOpPutObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=PutObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != nil { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(*v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectAcl struct { +} + +func (*awsRestxml_serializeOpPutObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LegalHold != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LegalHold", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ObjectLockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectRetention struct { +} + +func (*awsRestxml_serializeOpPutObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Retention != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Retention", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.BypassGovernanceRetention != nil { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectTagging struct { +} + +func (*awsRestxml_serializeOpPutObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.PublicAccessBlockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PublicAccessBlockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpRestoreObject struct { +} + +func (*awsRestxml_serializeOpRestoreObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RestoreObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RestoreRequest != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestoreRequest", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpSelectObjectContent struct { +} + +func (*awsRestxml_serializeOpSelectObjectContent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SelectObjectContentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/xml") + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectObjectContentRequest", + }, + Attr: rootAttr, + } + root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + return nil +} + +func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + if v.RequestProgress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestProgress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil { + return err + } + } + if v.ScanRange != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ScanRange", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil { + return err + } + } + return nil +} + +type awsRestxml_serializeOpUploadPart struct { +} + +func (*awsRestxml_serializeOpUploadPart) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPart") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentLength != nil { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(*v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpUploadPartCopy struct { +} + +func (*awsRestxml_serializeOpUploadPartCopy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartCopyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPartCopy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + locationName := "X-Amz-Copy-Source-Range" + encoder.SetHeader(locationName).String(*v.CopySourceRange) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + locationName := "X-Amz-Fwd-Header-Accept-Ranges" + encoder.SetHeader(locationName).String(*v.AcceptRanges) + } + + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "X-Amz-Fwd-Header-Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != nil { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(*v.ContentLength) + } + + if v.ContentRange != nil && len(*v.ContentRange) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Range" + encoder.SetHeader(locationName).String(*v.ContentRange) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.DeleteMarker != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" + encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) + } + + if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + locationName := "X-Amz-Fwd-Error-Code" + encoder.SetHeader(locationName).String(*v.ErrorCode) + } + + if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + locationName := "X-Amz-Fwd-Error-Message" + encoder.SetHeader(locationName).String(*v.ErrorMessage) + } + + if v.ETag != nil && len(*v.ETag) > 0 { + locationName := "X-Amz-Fwd-Header-Etag" + encoder.SetHeader(locationName).String(*v.ETag) + } + + if v.Expiration != nil && len(*v.Expiration) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" + encoder.SetHeader(locationName).String(*v.Expiration) + } + + if v.Expires != nil { + locationName := "X-Amz-Fwd-Header-Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.LastModified != nil { + locationName := "X-Amz-Fwd-Header-Last-Modified" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified)) + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if v.MissingMeta != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" + encoder.SetHeader(locationName).Integer(*v.MissingMeta) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if v.PartsCount != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" + encoder.SetHeader(locationName).Integer(*v.PartsCount) + } + + if len(v.ReplicationStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status" + encoder.SetHeader(locationName).String(string(v.ReplicationStatus)) + } + + if len(v.RequestCharged) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged" + encoder.SetHeader(locationName).String(string(v.RequestCharged)) + } + + if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + locationName := "X-Amz-Request-Route" + encoder.SetHeader(locationName).String(*v.RequestRoute) + } + + if v.RequestToken != nil && len(*v.RequestToken) > 0 { + locationName := "X-Amz-Request-Token" + encoder.SetHeader(locationName).String(*v.RequestToken) + } + + if v.Restore != nil && len(*v.Restore) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Restore" + encoder.SetHeader(locationName).String(*v.Restore) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if v.StatusCode != nil { + locationName := "X-Amz-Fwd-Status" + encoder.SetHeader(locationName).Integer(*v.StatusCode) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.TagCount != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" + encoder.SetHeader(locationName).Integer(*v.TagCount) + } + + if v.VersionId != nil && len(*v.VersionId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" + encoder.SetHeader(locationName).String(*v.VersionId) + } + + return nil +} + +func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.DaysAfterInitiation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DaysAfterInitiation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.DaysAfterInitiation) + } + return nil +} + +func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error { + defer value.Close() + if v.Grants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil { + return err + } + } + if v.Owner != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error { + defer value.Close() + if len(v.Owner) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Owner)) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.StorageClassAnalysis != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClassAnalysis", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.AnalyticsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.AnalyticsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.BucketAccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketAccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketAccountId) + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error { + defer value.Close() + if len(v.DataRedundancy) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataRedundancy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.DataRedundancy)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error { + defer value.Close() + if v.LoggingEnabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LoggingEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.Parts != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Part", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error { + defer value.Close() + if v.ChecksumCRC32 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32) + } + if v.ChecksumCRC32C != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32C", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32C) + } + if v.ChecksumSHA1 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA1", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA1) + } + if v.ChecksumSHA256 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA256", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA256) + } + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } + if v.PartNumber != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartNumber", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.PartNumber) + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error { + defer value.Close() + if v.HttpErrorCodeReturnedEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpErrorCodeReturnedEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpErrorCodeReturnedEquals) + } + if v.KeyPrefixEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyPrefixEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyPrefixEquals) + } + return nil +} + +func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.CORSRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error { + defer value.Close() + if v.AllowedHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil { + return err + } + } + if v.AllowedMethods != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedMethod", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil { + return err + } + } + if v.AllowedOrigins != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedOrigin", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil { + return err + } + } + if v.ExposeHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExposeHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.MaxAgeSeconds != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MaxAgeSeconds", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.MaxAgeSeconds) + } + return nil +} + +func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil { + return err + } + } + if v.Location != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Location", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil { + return err + } + } + if len(v.LocationConstraint) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LocationConstraint", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.LocationConstraint)) + } + return nil +} + +func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { + defer value.Close() + if v.AllowQuotedRecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowQuotedRecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.AllowQuotedRecordDelimiter) + } + if v.Comments != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Comments", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Comments) + } + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if len(v.FileHeaderInfo) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FileHeaderInfo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.FileHeaderInfo)) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error { + defer value.Close() + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if len(v.QuoteFields) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.QuoteFields)) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { + defer value.Close() + if v.Days != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Days) + } + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.Years != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Years", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Years) + } + return nil +} + +func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error { + defer value.Close() + if v.Objects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Object", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil { + return err + } + } + if v.Quiet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Quiet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.Quiet) + } + return nil +} + +func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlTranslation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlTranslation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil { + return err + } + } + if v.Account != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Account", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Account) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.EncryptionConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { + return err + } + } + if v.Metrics != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Metrics", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil { + return err + } + } + if v.ReplicationTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil { + return err + } + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error { + defer value.Close() + if len(v.EncryptionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.EncryptionType)) + } + if v.KMSContext != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSContext", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSContext) + } + if v.KMSKeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSKeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSKeyId) + } + return nil +} + +func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaKmsKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaKmsKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplicaKmsKeyID) + } + return nil +} + +func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + return nil +} + +func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error { + defer value.Close() + if len(v.Name) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Name)) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error { + defer value.Close() + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + return nil +} + +func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.EmailAddress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EmailAddress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.EmailAddress) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.URI != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "URI", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.URI) + } + return nil +} + +func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error { + defer value.Close() + if v.Suffix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Suffix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Suffix) + } + return nil +} + +func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error { + defer value.Close() + if len(v.CompressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CompressionType)) + } + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil { + return err + } + } + if v.Parquet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Parquet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Tierings != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tiering", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error { + defer value.Close() + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.IncludedObjectVersions) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IncludedObjectVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.IncludedObjectVersions)) + } + if v.IsEnabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IsEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.IsEnabled) + } + if v.OptionalFields != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OptionalFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil { + return err + } + } + if v.Schedule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Schedule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error { + defer value.Close() + if v.SSEKMS != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-KMS", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil { + return err + } + } + if v.SSES3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Field", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.AccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccountId) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil { + return err + } + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error { + defer value.Close() + if len(v.Frequency) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Frequency", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Frequency)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error { + defer value.Close() + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error { + defer value.Close() + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.LambdaFunctionArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunction", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.LambdaFunctionArn) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Days) + } + if v.ExpiredObjectDeleteMarker != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpiredObjectDeleteMarker", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.ExpiredObjectDeleteMarker) + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error { + defer value.Close() + if v.AbortIncompleteMultipartUpload != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AbortIncompleteMultipartUpload", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil { + return err + } + } + if v.Expiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.NoncurrentVersionExpiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionExpiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil { + return err + } + } + if v.NoncurrentVersionTransitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionTransition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Transitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Transition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.ObjectSizeGreaterThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberObjectSizeLessThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.LifecycleRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { + defer value.Close() + if v.TargetBucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetBucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetBucket) + } + if v.TargetGrants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetGrants", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil { + return err + } + } + if v.TargetObjectKeyFormat != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetObjectKeyFormat", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTargetObjectKeyFormat(v.TargetObjectKeyFormat, el); err != nil { + return err + } + } + if v.TargetPrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetPrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetPrefix) + } + return nil +} + +func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { + defer value.Close() + if v.EventThreshold != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventThreshold", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.AccessPointArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccessPointArn) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.MetricsFilterMemberAccessPointArn: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.MetricsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.NoncurrentDays) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.NoncurrentDays) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.EventBridgeConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventBridgeConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil { + return err + } + } + if v.LambdaFunctionConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunctionConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil { + return err + } + } + if v.QueueConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QueueConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil { + return err + } + } + if v.TopicConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TopicConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.VersionId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersionId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.VersionId) + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectLockEnabled) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectLockEnabled)) + } + if v.Rule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error { + defer value.Close() + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.RetainUntilDate != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RetainUntilDate", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.RetainUntilDate)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error { + defer value.Close() + if v.DefaultRetention != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DefaultRetention", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error { + defer value.Close() + if v.S3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error { + defer value.Close() + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectOwnership) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectOwnership", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectOwnership)) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentPartitionedPrefix(v *types.PartitionedPrefix, value smithyxml.Value) error { + defer value.Close() + if len(v.PartitionDateSource) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartitionDateSource", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.PartitionDateSource)) + } + return nil +} + +func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.BlockPublicAcls != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.BlockPublicAcls) + } + if v.BlockPublicPolicy != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicPolicy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.BlockPublicPolicy) + } + if v.IgnorePublicAcls != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IgnorePublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.IgnorePublicAcls) + } + if v.RestrictPublicBuckets != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestrictPublicBuckets", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.RestrictPublicBuckets) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.QueueArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Queue", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QueueArn) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if v.HttpRedirectCode != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpRedirectCode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpRedirectCode) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + if v.ReplaceKeyPrefixWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyPrefixWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyPrefixWith) + } + if v.ReplaceKeyWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyWith) + } + return nil +} + +func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Role != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Role", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Role) + } + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error { + defer value.Close() + if v.DeleteMarkerReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DeleteMarkerReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil { + return err + } + } + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil { + return err + } + } + if v.ExistingObjectReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExistingObjectReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Priority != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Priority", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Priority) + } + if v.SourceSelectionCriteria != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SourceSelectionCriteria", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.ReplicationRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.ReplicationRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Time != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Time", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { + defer value.Close() + if v.Minutes != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Minutes", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Minutes) + } + return nil +} + +func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Payer) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Payer", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Payer)) + } + return nil +} + +func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { + defer value.Close() + if v.Enabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Enabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.Enabled) + } + return nil +} + +func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { + defer value.Close() + if v.Days != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Days) + } + if v.Description != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Description", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Description) + } + if v.GlacierJobParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "GlacierJobParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil { + return err + } + } + if v.OutputLocation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputLocation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil { + return err + } + } + if v.SelectParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil { + return err + } + } + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error { + defer value.Close() + if v.Condition != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Condition", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil { + return err + } + } + if v.Redirect != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Redirect", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRule", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error { + defer value.Close() + if v.FilterRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FilterRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlList != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil { + return err + } + } + if v.BucketName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketName) + } + if len(v.CannedACL) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CannedACL", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CannedACL)) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + if v.Tagging != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil { + return err + } + } + if v.UserMetadata != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "UserMetadata", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { + defer value.Close() + if v.End != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "End", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.End) + } + if v.Start != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Start", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.Start) + } + return nil +} + +func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error { + defer value.Close() + if v.KMSMasterKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSMasterKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSMasterKeyID) + } + if len(v.SSEAlgorithm) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSEAlgorithm", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.SSEAlgorithm)) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error { + defer value.Close() + if v.ApplyServerSideEncryptionByDefault != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ApplyServerSideEncryptionByDefault", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil { + return err + } + } + if v.BucketKeyEnabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketKeyEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(*v.BucketKeyEnabled) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSimplePrefix(v *types.SimplePrefix, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaModifications != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaModifications", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil { + return err + } + } + if v.SseKmsEncryptedObjects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SseKmsEncryptedObjects", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error { + defer value.Close() + if v.KeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyId) + } + return nil +} + +func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error { + defer value.Close() + if v.DataExport != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataExport", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil { + return err + } + } + if len(v.OutputSchemaVersion) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSchemaVersion", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.OutputSchemaVersion)) + } + return nil +} + +func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error { + defer value.Close() + if v.TagSet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TagSet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTargetObjectKeyFormat(v *types.TargetObjectKeyFormat, value smithyxml.Value) error { + defer value.Close() + if v.PartitionedPrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartitionedPrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentPartitionedPrefix(v.PartitionedPrefix, el); err != nil { + return err + } + } + if v.SimplePrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SimplePrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSimplePrefix(v.SimplePrefix, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { + defer value.Close() + if len(v.AccessTier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessTier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.AccessTier)) + } + if v.Days != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Days) + } + return nil +} + +func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.TopicArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Topic", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TopicArn) + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(*v.Days) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataEntry", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.MFADelete) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MfaDelete", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.MFADelete)) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ErrorDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ErrorDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil { + return err + } + } + if v.IndexDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IndexDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil { + return err + } + } + if v.RedirectAllRequestsTo != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RedirectAllRequestsTo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil { + return err + } + } + if v.RoutingRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRules", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go new file mode 100644 index 000000000..ea3b9c82a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -0,0 +1,1355 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AnalyticsS3ExportFileFormat string + +// Enum values for AnalyticsS3ExportFileFormat +const ( + AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV" +) + +// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { + return []AnalyticsS3ExportFileFormat{ + "CSV", + } +} + +type ArchiveStatus string + +// Enum values for ArchiveStatus +const ( + ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS" + ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for ArchiveStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ArchiveStatus) Values() []ArchiveStatus { + return []ArchiveStatus{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type BucketAccelerateStatus string + +// Enum values for BucketAccelerateStatus +const ( + BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled" + BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended" +) + +// Values returns all known values for BucketAccelerateStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { + return []BucketAccelerateStatus{ + "Enabled", + "Suspended", + } +} + +type BucketCannedACL string + +// Enum values for BucketCannedACL +const ( + BucketCannedACLPrivate BucketCannedACL = "private" + BucketCannedACLPublicRead BucketCannedACL = "public-read" + BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write" + BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read" +) + +// Values returns all known values for BucketCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketCannedACL) Values() []BucketCannedACL { + return []BucketCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + } +} + +type BucketLocationConstraint string + +// Enum values for BucketLocationConstraint +const ( + BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1" + BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1" + BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1" + BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" + BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" + BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" + BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" + BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" + BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" + BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" + BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" + BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" + BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" + BucketLocationConstraintEu BucketLocationConstraint = "EU" + BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" + BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" + BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" + BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" + BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" + BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" + BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" + BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" + BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1" + BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" + BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" + BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" +) + +// Values returns all known values for BucketLocationConstraint. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (BucketLocationConstraint) Values() []BucketLocationConstraint { + return []BucketLocationConstraint{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-south-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "EU", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-south-2", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + } +} + +type BucketLogsPermission string + +// Enum values for BucketLogsPermission +const ( + BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" + BucketLogsPermissionRead BucketLogsPermission = "READ" + BucketLogsPermissionWrite BucketLogsPermission = "WRITE" +) + +// Values returns all known values for BucketLogsPermission. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketLogsPermission) Values() []BucketLogsPermission { + return []BucketLogsPermission{ + "FULL_CONTROL", + "READ", + "WRITE", + } +} + +type BucketType string + +// Enum values for BucketType +const ( + BucketTypeDirectory BucketType = "Directory" +) + +// Values returns all known values for BucketType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (BucketType) Values() []BucketType { + return []BucketType{ + "Directory", + } +} + +type BucketVersioningStatus string + +// Enum values for BucketVersioningStatus +const ( + BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled" + BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended" +) + +// Values returns all known values for BucketVersioningStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketVersioningStatus) Values() []BucketVersioningStatus { + return []BucketVersioningStatus{ + "Enabled", + "Suspended", + } +} + +type ChecksumAlgorithm string + +// Enum values for ChecksumAlgorithm +const ( + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" +) + +// Values returns all known values for ChecksumAlgorithm. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { + return []ChecksumAlgorithm{ + "CRC32", + "CRC32C", + "SHA1", + "SHA256", + } +} + +type ChecksumMode string + +// Enum values for ChecksumMode +const ( + ChecksumModeEnabled ChecksumMode = "ENABLED" +) + +// Values returns all known values for ChecksumMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumMode) Values() []ChecksumMode { + return []ChecksumMode{ + "ENABLED", + } +} + +type CompressionType string + +// Enum values for CompressionType +const ( + CompressionTypeNone CompressionType = "NONE" + CompressionTypeGzip CompressionType = "GZIP" + CompressionTypeBzip2 CompressionType = "BZIP2" +) + +// Values returns all known values for CompressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CompressionType) Values() []CompressionType { + return []CompressionType{ + "NONE", + "GZIP", + "BZIP2", + } +} + +type DataRedundancy string + +// Enum values for DataRedundancy +const ( + DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" +) + +// Values returns all known values for DataRedundancy. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DataRedundancy) Values() []DataRedundancy { + return []DataRedundancy{ + "SingleAvailabilityZone", + } +} + +type DeleteMarkerReplicationStatus string + +// Enum values for DeleteMarkerReplicationStatus +const ( + DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled" + DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled" +) + +// Values returns all known values for DeleteMarkerReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { + return []DeleteMarkerReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type EncodingType string + +// Enum values for EncodingType +const ( + EncodingTypeUrl EncodingType = "url" +) + +// Values returns all known values for EncodingType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (EncodingType) Values() []EncodingType { + return []EncodingType{ + "url", + } +} + +type Event string + +// Enum values for Event +const ( + EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" + EventS3ObjectCreated Event = "s3:ObjectCreated:*" + EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" + EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" + EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" + EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" + EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" + EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" + EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" + EventS3ObjectRestore Event = "s3:ObjectRestore:*" + EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" + EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" + EventS3Replication Event = "s3:Replication:*" + EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication" + EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked" + EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold" + EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold" + EventS3ObjectRestoreDelete Event = "s3:ObjectRestore:Delete" + EventS3LifecycleTransition Event = "s3:LifecycleTransition" + EventS3IntelligentTiering Event = "s3:IntelligentTiering" + EventS3ObjectAclPut Event = "s3:ObjectAcl:Put" + EventS3LifecycleExpiration Event = "s3:LifecycleExpiration:*" + EventS3LifecycleExpirationDelete Event = "s3:LifecycleExpiration:Delete" + EventS3LifecycleExpirationDeleteMarkerCreated Event = "s3:LifecycleExpiration:DeleteMarkerCreated" + EventS3ObjectTagging Event = "s3:ObjectTagging:*" + EventS3ObjectTaggingPut Event = "s3:ObjectTagging:Put" + EventS3ObjectTaggingDelete Event = "s3:ObjectTagging:Delete" +) + +// Values returns all known values for Event. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Event) Values() []Event { + return []Event{ + "s3:ReducedRedundancyLostObject", + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold", + "s3:ObjectRestore:Delete", + "s3:LifecycleTransition", + "s3:IntelligentTiering", + "s3:ObjectAcl:Put", + "s3:LifecycleExpiration:*", + "s3:LifecycleExpiration:Delete", + "s3:LifecycleExpiration:DeleteMarkerCreated", + "s3:ObjectTagging:*", + "s3:ObjectTagging:Put", + "s3:ObjectTagging:Delete", + } +} + +type ExistingObjectReplicationStatus string + +// Enum values for ExistingObjectReplicationStatus +const ( + ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" + ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" +) + +// Values returns all known values for ExistingObjectReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { + return []ExistingObjectReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpirationStatus string + +// Enum values for ExpirationStatus +const ( + ExpirationStatusEnabled ExpirationStatus = "Enabled" + ExpirationStatusDisabled ExpirationStatus = "Disabled" +) + +// Values returns all known values for ExpirationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationStatus) Values() []ExpirationStatus { + return []ExpirationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpressionType string + +// Enum values for ExpressionType +const ( + ExpressionTypeSql ExpressionType = "SQL" +) + +// Values returns all known values for ExpressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpressionType) Values() []ExpressionType { + return []ExpressionType{ + "SQL", + } +} + +type FileHeaderInfo string + +// Enum values for FileHeaderInfo +const ( + FileHeaderInfoUse FileHeaderInfo = "USE" + FileHeaderInfoIgnore FileHeaderInfo = "IGNORE" + FileHeaderInfoNone FileHeaderInfo = "NONE" +) + +// Values returns all known values for FileHeaderInfo. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FileHeaderInfo) Values() []FileHeaderInfo { + return []FileHeaderInfo{ + "USE", + "IGNORE", + "NONE", + } +} + +type FilterRuleName string + +// Enum values for FilterRuleName +const ( + FilterRuleNamePrefix FilterRuleName = "prefix" + FilterRuleNameSuffix FilterRuleName = "suffix" +) + +// Values returns all known values for FilterRuleName. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FilterRuleName) Values() []FilterRuleName { + return []FilterRuleName{ + "prefix", + "suffix", + } +} + +type IntelligentTieringAccessTier string + +// Enum values for IntelligentTieringAccessTier +const ( + IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS" + IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for IntelligentTieringAccessTier. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { + return []IntelligentTieringAccessTier{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type IntelligentTieringStatus string + +// Enum values for IntelligentTieringStatus +const ( + IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled" + IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" +) + +// Values returns all known values for IntelligentTieringStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { + return []IntelligentTieringStatus{ + "Enabled", + "Disabled", + } +} + +type InventoryFormat string + +// Enum values for InventoryFormat +const ( + InventoryFormatCsv InventoryFormat = "CSV" + InventoryFormatOrc InventoryFormat = "ORC" + InventoryFormatParquet InventoryFormat = "Parquet" +) + +// Values returns all known values for InventoryFormat. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFormat) Values() []InventoryFormat { + return []InventoryFormat{ + "CSV", + "ORC", + "Parquet", + } +} + +type InventoryFrequency string + +// Enum values for InventoryFrequency +const ( + InventoryFrequencyDaily InventoryFrequency = "Daily" + InventoryFrequencyWeekly InventoryFrequency = "Weekly" +) + +// Values returns all known values for InventoryFrequency. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFrequency) Values() []InventoryFrequency { + return []InventoryFrequency{ + "Daily", + "Weekly", + } +} + +type InventoryIncludedObjectVersions string + +// Enum values for InventoryIncludedObjectVersions +const ( + InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All" + InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current" +) + +// Values returns all known values for InventoryIncludedObjectVersions. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { + return []InventoryIncludedObjectVersions{ + "All", + "Current", + } +} + +type InventoryOptionalField string + +// Enum values for InventoryOptionalField +const ( + InventoryOptionalFieldSize InventoryOptionalField = "Size" + InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate" + InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass" + InventoryOptionalFieldETag InventoryOptionalField = "ETag" + InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded" + InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus" + InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus" + InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate" + InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode" + InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus" + InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" + InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" + InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" + InventoryOptionalFieldObjectAccessControlList InventoryOptionalField = "ObjectAccessControlList" + InventoryOptionalFieldObjectOwner InventoryOptionalField = "ObjectOwner" +) + +// Values returns all known values for InventoryOptionalField. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryOptionalField) Values() []InventoryOptionalField { + return []InventoryOptionalField{ + "Size", + "LastModifiedDate", + "StorageClass", + "ETag", + "IsMultipartUploaded", + "ReplicationStatus", + "EncryptionStatus", + "ObjectLockRetainUntilDate", + "ObjectLockMode", + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier", + "BucketKeyStatus", + "ChecksumAlgorithm", + "ObjectAccessControlList", + "ObjectOwner", + } +} + +type JSONType string + +// Enum values for JSONType +const ( + JSONTypeDocument JSONType = "DOCUMENT" + JSONTypeLines JSONType = "LINES" +) + +// Values returns all known values for JSONType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (JSONType) Values() []JSONType { + return []JSONType{ + "DOCUMENT", + "LINES", + } +} + +type LocationType string + +// Enum values for LocationType +const ( + LocationTypeAvailabilityZone LocationType = "AvailabilityZone" +) + +// Values returns all known values for LocationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LocationType) Values() []LocationType { + return []LocationType{ + "AvailabilityZone", + } +} + +type MetadataDirective string + +// Enum values for MetadataDirective +const ( + MetadataDirectiveCopy MetadataDirective = "COPY" + MetadataDirectiveReplace MetadataDirective = "REPLACE" +) + +// Values returns all known values for MetadataDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetadataDirective) Values() []MetadataDirective { + return []MetadataDirective{ + "COPY", + "REPLACE", + } +} + +type MetricsStatus string + +// Enum values for MetricsStatus +const ( + MetricsStatusEnabled MetricsStatus = "Enabled" + MetricsStatusDisabled MetricsStatus = "Disabled" +) + +// Values returns all known values for MetricsStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetricsStatus) Values() []MetricsStatus { + return []MetricsStatus{ + "Enabled", + "Disabled", + } +} + +type MFADelete string + +// Enum values for MFADelete +const ( + MFADeleteEnabled MFADelete = "Enabled" + MFADeleteDisabled MFADelete = "Disabled" +) + +// Values returns all known values for MFADelete. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (MFADelete) Values() []MFADelete { + return []MFADelete{ + "Enabled", + "Disabled", + } +} + +type MFADeleteStatus string + +// Enum values for MFADeleteStatus +const ( + MFADeleteStatusEnabled MFADeleteStatus = "Enabled" + MFADeleteStatusDisabled MFADeleteStatus = "Disabled" +) + +// Values returns all known values for MFADeleteStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MFADeleteStatus) Values() []MFADeleteStatus { + return []MFADeleteStatus{ + "Enabled", + "Disabled", + } +} + +type ObjectAttributes string + +// Enum values for ObjectAttributes +const ( + ObjectAttributesEtag ObjectAttributes = "ETag" + ObjectAttributesChecksum ObjectAttributes = "Checksum" + ObjectAttributesObjectParts ObjectAttributes = "ObjectParts" + ObjectAttributesStorageClass ObjectAttributes = "StorageClass" + ObjectAttributesObjectSize ObjectAttributes = "ObjectSize" +) + +// Values returns all known values for ObjectAttributes. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectAttributes) Values() []ObjectAttributes { + return []ObjectAttributes{ + "ETag", + "Checksum", + "ObjectParts", + "StorageClass", + "ObjectSize", + } +} + +type ObjectCannedACL string + +// Enum values for ObjectCannedACL +const ( + ObjectCannedACLPrivate ObjectCannedACL = "private" + ObjectCannedACLPublicRead ObjectCannedACL = "public-read" + ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write" + ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read" + ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read" + ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read" + ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control" +) + +// Values returns all known values for ObjectCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectCannedACL) Values() []ObjectCannedACL { + return []ObjectCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + } +} + +type ObjectLockEnabled string + +// Enum values for ObjectLockEnabled +const ( + ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" +) + +// Values returns all known values for ObjectLockEnabled. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockEnabled) Values() []ObjectLockEnabled { + return []ObjectLockEnabled{ + "Enabled", + } +} + +type ObjectLockLegalHoldStatus string + +// Enum values for ObjectLockLegalHoldStatus +const ( + ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON" + ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF" +) + +// Values returns all known values for ObjectLockLegalHoldStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { + return []ObjectLockLegalHoldStatus{ + "ON", + "OFF", + } +} + +type ObjectLockMode string + +// Enum values for ObjectLockMode +const ( + ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE" + ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockMode) Values() []ObjectLockMode { + return []ObjectLockMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectLockRetentionMode string + +// Enum values for ObjectLockRetentionMode +const ( + ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE" + ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockRetentionMode. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { + return []ObjectLockRetentionMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectOwnership string + +// Enum values for ObjectOwnership +const ( + ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred" + ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter" + ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced" +) + +// Values returns all known values for ObjectOwnership. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectOwnership) Values() []ObjectOwnership { + return []ObjectOwnership{ + "BucketOwnerPreferred", + "ObjectWriter", + "BucketOwnerEnforced", + } +} + +type ObjectStorageClass string + +// Enum values for ObjectStorageClass +const ( + ObjectStorageClassStandard ObjectStorageClass = "STANDARD" + ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" + ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" + ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" + ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" + ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" + ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" + ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" + ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" + ObjectStorageClassSnow ObjectStorageClass = "SNOW" + ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE" +) + +// Values returns all known values for ObjectStorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectStorageClass) Values() []ObjectStorageClass { + return []ObjectStorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + "SNOW", + "EXPRESS_ONEZONE", + } +} + +type ObjectVersionStorageClass string + +// Enum values for ObjectVersionStorageClass +const ( + ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD" +) + +// Values returns all known values for ObjectVersionStorageClass. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { + return []ObjectVersionStorageClass{ + "STANDARD", + } +} + +type OptionalObjectAttributes string + +// Enum values for OptionalObjectAttributes +const ( + OptionalObjectAttributesRestoreStatus OptionalObjectAttributes = "RestoreStatus" +) + +// Values returns all known values for OptionalObjectAttributes. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { + return []OptionalObjectAttributes{ + "RestoreStatus", + } +} + +type OwnerOverride string + +// Enum values for OwnerOverride +const ( + OwnerOverrideDestination OwnerOverride = "Destination" +) + +// Values returns all known values for OwnerOverride. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (OwnerOverride) Values() []OwnerOverride { + return []OwnerOverride{ + "Destination", + } +} + +type PartitionDateSource string + +// Enum values for PartitionDateSource +const ( + PartitionDateSourceEventTime PartitionDateSource = "EventTime" + PartitionDateSourceDeliveryTime PartitionDateSource = "DeliveryTime" +) + +// Values returns all known values for PartitionDateSource. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (PartitionDateSource) Values() []PartitionDateSource { + return []PartitionDateSource{ + "EventTime", + "DeliveryTime", + } +} + +type Payer string + +// Enum values for Payer +const ( + PayerRequester Payer = "Requester" + PayerBucketOwner Payer = "BucketOwner" +) + +// Values returns all known values for Payer. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Payer) Values() []Payer { + return []Payer{ + "Requester", + "BucketOwner", + } +} + +type Permission string + +// Enum values for Permission +const ( + PermissionFullControl Permission = "FULL_CONTROL" + PermissionWrite Permission = "WRITE" + PermissionWriteAcp Permission = "WRITE_ACP" + PermissionRead Permission = "READ" + PermissionReadAcp Permission = "READ_ACP" +) + +// Values returns all known values for Permission. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (Permission) Values() []Permission { + return []Permission{ + "FULL_CONTROL", + "WRITE", + "WRITE_ACP", + "READ", + "READ_ACP", + } +} + +type Protocol string + +// Enum values for Protocol +const ( + ProtocolHttp Protocol = "http" + ProtocolHttps Protocol = "https" +) + +// Values returns all known values for Protocol. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Protocol) Values() []Protocol { + return []Protocol{ + "http", + "https", + } +} + +type QuoteFields string + +// Enum values for QuoteFields +const ( + QuoteFieldsAlways QuoteFields = "ALWAYS" + QuoteFieldsAsneeded QuoteFields = "ASNEEDED" +) + +// Values returns all known values for QuoteFields. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (QuoteFields) Values() []QuoteFields { + return []QuoteFields{ + "ALWAYS", + "ASNEEDED", + } +} + +type ReplicaModificationsStatus string + +// Enum values for ReplicaModificationsStatus +const ( + ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled" + ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled" +) + +// Values returns all known values for ReplicaModificationsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { + return []ReplicaModificationsStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationRuleStatus string + +// Enum values for ReplicationRuleStatus +const ( + ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled" + ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" +) + +// Values returns all known values for ReplicationRuleStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { + return []ReplicationRuleStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationStatus string + +// Enum values for ReplicationStatus +const ( + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusPending ReplicationStatus = "PENDING" + ReplicationStatusFailed ReplicationStatus = "FAILED" + ReplicationStatusReplica ReplicationStatus = "REPLICA" + ReplicationStatusCompleted ReplicationStatus = "COMPLETED" +) + +// Values returns all known values for ReplicationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationStatus) Values() []ReplicationStatus { + return []ReplicationStatus{ + "COMPLETE", + "PENDING", + "FAILED", + "REPLICA", + "COMPLETED", + } +} + +type ReplicationTimeStatus string + +// Enum values for ReplicationTimeStatus +const ( + ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" + ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" +) + +// Values returns all known values for ReplicationTimeStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { + return []ReplicationTimeStatus{ + "Enabled", + "Disabled", + } +} + +type RequestCharged string + +// Enum values for RequestCharged +const ( + RequestChargedRequester RequestCharged = "requester" +) + +// Values returns all known values for RequestCharged. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RequestCharged) Values() []RequestCharged { + return []RequestCharged{ + "requester", + } +} + +type RequestPayer string + +// Enum values for RequestPayer +const ( + RequestPayerRequester RequestPayer = "requester" +) + +// Values returns all known values for RequestPayer. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RequestPayer) Values() []RequestPayer { + return []RequestPayer{ + "requester", + } +} + +type RestoreRequestType string + +// Enum values for RestoreRequestType +const ( + RestoreRequestTypeSelect RestoreRequestType = "SELECT" +) + +// Values returns all known values for RestoreRequestType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RestoreRequestType) Values() []RestoreRequestType { + return []RestoreRequestType{ + "SELECT", + } +} + +type ServerSideEncryption string + +// Enum values for ServerSideEncryption +const ( + ServerSideEncryptionAes256 ServerSideEncryption = "AES256" + ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" + ServerSideEncryptionAwsKmsDsse ServerSideEncryption = "aws:kms:dsse" +) + +// Values returns all known values for ServerSideEncryption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ServerSideEncryption) Values() []ServerSideEncryption { + return []ServerSideEncryption{ + "AES256", + "aws:kms", + "aws:kms:dsse", + } +} + +type SessionMode string + +// Enum values for SessionMode +const ( + SessionModeReadOnly SessionMode = "ReadOnly" + SessionModeReadWrite SessionMode = "ReadWrite" +) + +// Values returns all known values for SessionMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SessionMode) Values() []SessionMode { + return []SessionMode{ + "ReadOnly", + "ReadWrite", + } +} + +type SseKmsEncryptedObjectsStatus string + +// Enum values for SseKmsEncryptedObjectsStatus +const ( + SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled" + SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" +) + +// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { + return []SseKmsEncryptedObjectsStatus{ + "Enabled", + "Disabled", + } +} + +type StorageClass string + +// Enum values for StorageClass +const ( + StorageClassStandard StorageClass = "STANDARD" + StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" + StorageClassStandardIa StorageClass = "STANDARD_IA" + StorageClassOnezoneIa StorageClass = "ONEZONE_IA" + StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" + StorageClassGlacier StorageClass = "GLACIER" + StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" + StorageClassOutposts StorageClass = "OUTPOSTS" + StorageClassGlacierIr StorageClass = "GLACIER_IR" + StorageClassSnow StorageClass = "SNOW" + StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE" +) + +// Values returns all known values for StorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (StorageClass) Values() []StorageClass { + return []StorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + "SNOW", + "EXPRESS_ONEZONE", + } +} + +type StorageClassAnalysisSchemaVersion string + +// Enum values for StorageClassAnalysisSchemaVersion +const ( + StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" +) + +// Values returns all known values for StorageClassAnalysisSchemaVersion. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { + return []StorageClassAnalysisSchemaVersion{ + "V_1", + } +} + +type TaggingDirective string + +// Enum values for TaggingDirective +const ( + TaggingDirectiveCopy TaggingDirective = "COPY" + TaggingDirectiveReplace TaggingDirective = "REPLACE" +) + +// Values returns all known values for TaggingDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TaggingDirective) Values() []TaggingDirective { + return []TaggingDirective{ + "COPY", + "REPLACE", + } +} + +type Tier string + +// Enum values for Tier +const ( + TierStandard Tier = "Standard" + TierBulk Tier = "Bulk" + TierExpedited Tier = "Expedited" +) + +// Values returns all known values for Tier. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Tier) Values() []Tier { + return []Tier{ + "Standard", + "Bulk", + "Expedited", + } +} + +type TransitionStorageClass string + +// Enum values for TransitionStorageClass +const ( + TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" + TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" + TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" + TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" + TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" + TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR" +) + +// Values returns all known values for TransitionStorageClass. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TransitionStorageClass) Values() []TransitionStorageClass { + return []TransitionStorageClass{ + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "GLACIER_IR", + } +} + +type Type string + +// Enum values for Type +const ( + TypeCanonicalUser Type = "CanonicalUser" + TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail" + TypeGroup Type = "Group" +) + +// Values returns all known values for Type. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Type) Values() []Type { + return []Type{ + "CanonicalUser", + "AmazonCustomerByEmail", + "Group", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go new file mode 100644 index 000000000..166484f4e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The requested bucket name is not available. The bucket namespace is shared by +// all users of the system. Select a different name and try again. +type BucketAlreadyExists struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyExists) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyExists) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BucketAlreadyExists" + } + return *e.ErrorCodeOverride +} +func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all Amazon Web Services Regions except in the North +// Virginia Region. For legacy compatibility, if you re-create an existing bucket +// that you already own in the North Virginia Region, Amazon S3 returns 200 OK and +// resets the bucket access control lists (ACLs). +type BucketAlreadyOwnedByYou struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyOwnedByYou) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyOwnedByYou) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyOwnedByYou) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BucketAlreadyOwnedByYou" + } + return *e.ErrorCodeOverride +} +func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. If the object you are +// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 +// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access +// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can +// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +type InvalidObjectState struct { + Message *string + + ErrorCodeOverride *string + + StorageClass StorageClass + AccessTier IntelligentTieringAccessTier + + noSmithyDocumentSerde +} + +func (e *InvalidObjectState) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidObjectState) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidObjectState) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidObjectState" + } + return *e.ErrorCodeOverride +} +func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified bucket does not exist. +type NoSuchBucket struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NoSuchBucket) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchBucket) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchBucket) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NoSuchBucket" + } + return *e.ErrorCodeOverride +} +func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified key does not exist. +type NoSuchKey struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NoSuchKey) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchKey) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchKey) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NoSuchKey" + } + return *e.ErrorCodeOverride +} +func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified multipart upload does not exist. +type NoSuchUpload struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NoSuchUpload) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchUpload) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchUpload) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NoSuchUpload" + } + return *e.ErrorCodeOverride +} +func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified content does not exist. +type NotFound struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *NotFound) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFound) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFound) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "NotFound" + } + return *e.ErrorCodeOverride +} +func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// This action is not allowed against this storage tier. +type ObjectAlreadyInActiveTierError struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ObjectAlreadyInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ObjectAlreadyInActiveTierError" + } + return *e.ErrorCodeOverride +} +func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +type ObjectNotInActiveTierError struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ObjectNotInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectNotInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectNotInActiveTierError) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ObjectNotInActiveTierError" + } + return *e.ErrorCodeOverride +} +func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go new file mode 100644 index 000000000..4299b57cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -0,0 +1,3528 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Specifies the days since the initiation of an incomplete multipart upload that +// Amazon S3 will wait before permanently removing all parts of the upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + + // Specifies the number of days after which Amazon S3 aborts an incomplete + // multipart upload. + DaysAfterInitiation *int32 + + noSmithyDocumentSerde +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +type AccelerateConfiguration struct { + + // Specifies the transfer acceleration status of the bucket. + Status BucketAccelerateStatus + + noSmithyDocumentSerde +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + + // A list of grants. + Grants []Grant + + // Container for the bucket owner's display name and ID. + Owner *Owner + + noSmithyDocumentSerde +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // This member is required. + Owner OwnerOverride + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any +// combination, and an object must match all of the predicates for the filter to +// apply. +type AnalyticsAndOperator struct { + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string + + // The list of tags to use when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the configuration and any analyses for the analytics filter of an +// Amazon S3 bucket. +type AnalyticsConfiguration struct { + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // Contains data related to access patterns to be collected and made available to + // analyze the tradeoffs between different storage classes. + // + // This member is required. + StorageClassAnalysis *StorageClassAnalysis + + // The filter used to describe a set of objects for analyses. A filter must have + // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no + // filter is provided, all objects will be considered in any analysis. + Filter AnalyticsFilter + + noSmithyDocumentSerde +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + + // A destination signifying output to an S3 bucket. + // + // This member is required. + S3BucketDestination *AnalyticsS3BucketDestination + + noSmithyDocumentSerde +} + +// The filter used to describe a set of objects for analyses. A filter must have +// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no +// filter is provided, all objects will be considered in any analysis. +// +// The following types satisfy this interface: +// +// AnalyticsFilterMemberAnd +// AnalyticsFilterMemberPrefix +// AnalyticsFilterMemberTag +type AnalyticsFilter interface { + isAnalyticsFilter() +} + +// A conjunction (logical AND) of predicates, which is used in evaluating an +// analytics filter. The operator must have at least two predicates. +type AnalyticsFilterMemberAnd struct { + Value AnalyticsAndOperator + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {} + +// The prefix to use when evaluating an analytics filter. +type AnalyticsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {} + +// The tag to use when evaluating an analytics filter. +type AnalyticsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // This member is required. + Bucket *string + + // Specifies the file format used when exporting data to Amazon S3. + // + // This member is required. + Format AnalyticsS3ExportFileFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + BucketAccountId *string + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string + + noSmithyDocumentSerde +} + +// In terms of implementation, a Bucket is a resource. +type Bucket struct { + + // Date the bucket was created. This date can change when making changes to your + // bucket, such as editing its bucket policy. + CreationDate *time.Time + + // The name of the bucket. + Name *string + + noSmithyDocumentSerde +} + +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type BucketInfo struct { + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy DataRedundancy + + // The type of bucket. + Type BucketType + + noSmithyDocumentSerde +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For +// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // This member is required. + Rules []LifecycleRule + + noSmithyDocumentSerde +} + +// Container for logging status information. +type BucketLoggingStatus struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all + // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled + + noSmithyDocumentSerde +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + noSmithyDocumentSerde +} + +// Container for all (if there are any) keys between Prefix and the next +// occurrence of the string specified by a delimiter. CommonPrefixes lists keys +// that act like subdirectories in the directory specified by Prefix. For example, +// if the prefix is notes/ and the delimiter is a slash (/) as in +// notes/summer/july, the common prefix is notes/summer/. +type CommonPrefix struct { + + // Container for the specified common prefix. + Prefix *string + + noSmithyDocumentSerde +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + + // Array of CompletedPart data types. If you do not supply a valid Part with your + // request, the service sends back an HTTP 400 response. + Parts []CompletedPart + + noSmithyDocumentSerde +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Part number that identifies the part. This is a positive integer between 1 and + // 10,000. + // - General purpose buckets - In CompleteMultipartUpload , when a additional + // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , + // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the + // PartNumber must start at 1 and the part numbers must be consecutive. + // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an + // InvalidPartOrder error code. + // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start + // at 1 and the part numbers must be consecutive. + PartNumber *int32 + + noSmithyDocumentSerde +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + + // The HTTP error code when the redirect is applied. In the event of an error, if + // the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html + // . To redirect request for all pages with the prefix docs/ , the key prefix will + // be /docs , which identifies all objects in the docs/ folder. Required when the + // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for the + // redirect to be applied. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + KeyPrefixEquals *string + + noSmithyDocumentSerde +} + +type ContinuationEvent struct { + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyObjectResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string + + // Creation date of the object. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyPartResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag of the object. + ETag *string + + // Date and time at which the object was uploaded. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Describes the cross-origin access configuration for objects in an Amazon S3 +// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. +type CORSConfiguration struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + // + // This member is required. + CORSRules []CORSRule + + noSmithyDocumentSerde +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + + // An HTTP method that you allow the origin to execute. Valid values are GET , PUT + // , HEAD , POST , and DELETE . + // + // This member is required. + AllowedMethods []string + + // One or more origins you want customers to be able to access the bucket from. + // + // This member is required. + AllowedOrigins []string + + // Headers that are specified in the Access-Control-Request-Headers header. These + // headers are allowed in a preflight OPTIONS request. In response to any preflight + // OPTIONS request, Amazon S3 returns any requested headers that are allowed. + AllowedHeaders []string + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []string + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // The time in seconds that your browser is to cache the preflight response for + // the specified resource. + MaxAgeSeconds *int32 + + noSmithyDocumentSerde +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + + // Specifies the information about the bucket that will be created. This + // functionality is only supported by directory buckets. + Bucket *BucketInfo + + // Specifies the location where the bucket will be created. For directory buckets, + // the location type is Availability Zone. This functionality is only supported by + // directory buckets. + Location *LocationInfo + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous to + // create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is + // created in the US East (N. Virginia) Region (us-east-1) by default. This + // functionality is not supported for directory buckets. + LocationConstraint BucketLocationConstraint + + noSmithyDocumentSerde +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + + // Specifies that CSV field values may contain quoted record delimiters and such + // records should be allowed. Default value is FALSE. Setting this value to TRUE + // may lower performance. + AllowQuotedRecordDelimiter *bool + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character to + // indicate a comment line. The default character is # . Default: # + Comments *string + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string + + // Describes the first line of input. Valid values are: + // - NONE : First line is not a header. + // - IGNORE : First line is a header, but you can't use the header values to + // indicate the column in an expression. You can use column position (such as _1, + // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // - Use : First line is a header, and you can use the header value to identify a + // column in an expression ( SELECT "name" FROM OBJECT ). + FileHeaderInfo FileHeaderInfo + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b , Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV + QuoteCharacter *string + + // A single character used for escaping the quotation mark character inside an + // already escaped value. For example, the value """ a , b """ is parsed as " a , + // b " . + QuoteEscapeCharacter *string + + // A single character used to separate individual records in the input. Instead of + // the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results are +// formatted. +type CSVOutput struct { + + // The value used to separate individual fields in a record. You can specify an + // arbitrary delimiter. + FieldDelimiter *string + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b , Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b " . + QuoteCharacter *string + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string + + // Indicates whether to use quotation marks around output fields. + // - ALWAYS : Always use quotation marks for output fields. + // - ASNEEDED : Use quotation marks for output fields when needed. + QuoteFields QuoteFields + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// - The DefaultRetention settings require both a mode and a period. +// - The DefaultRetention period can be either Days or Years but you must select +// one. You cannot specify Days and Years at the same time. +type DefaultRetention struct { + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode . + Days *int32 + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years . + Mode ObjectLockRetentionMode + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode . + Years *int32 + + noSmithyDocumentSerde +} + +// Container for the objects to delete. +type Delete struct { + + // The object to delete. Directory buckets - For directory buckets, an object + // that's composed entirely of whitespace characters is not supported by the + // DeleteObjects API operation. The request will receive a 400 Bad Request error + // and none of the objects in the request will be deleted. + // + // This member is required. + Objects []ObjectIdentifier + + // Element to enable quiet mode for the request. When you add this element, you + // must set its value to true . + Quiet *bool + + noSmithyDocumentSerde +} + +// Information about the deleted object. +type DeletedObject struct { + + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. This functionality is not supported for directory + // buckets. + DeleteMarker *bool + + // The version ID of the delete marker created as a result of the DELETE + // operation. If you delete a specific object version, the value returned by this + // header is the version ID of the object version deleted. This functionality is + // not supported for directory buckets. + DeleteMarkerVersionId *string + + // The name of the deleted object. + Key *string + + // The version ID of the deleted object. This functionality is not supported for + // directory buckets. + VersionId *string + + noSmithyDocumentSerde +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest *bool + + // The object key. + Key *string + + // Date and time when the object was last modified. + LastModified *time.Time + + // The account that created the delete marker.> + Owner *Owner + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a +// DeleteMarkerReplication element. If your Filter includes a Tag element, the +// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does +// not support replicating delete markers for tag-based rules. For an example +// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) +// . For more information about delete marker replication, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) +// . If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, see +// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) +// . +type DeleteMarkerReplication struct { + + // Indicates whether to replicate delete markers. Indicates whether to replicate + // delete markers. + Status DeleteMarkerReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // the results. + // + // This member is required. + Bucket *string + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership to the + // Amazon Web Services account that owns the destination bucket. If this is not + // specified in the replication configuration, the replicas are owned by same + // Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation + + // Destination bucket owner account ID. In a cross-account scenario, if you direct + // Amazon S3 to change replica ownership to the Amazon Web Services account that + // owns the destination bucket by specifying the AccessControlTranslation + // property, this is the account ID of the destination bucket owner. For more + // information, see Replication Additional Configuration: Changing the Replica + // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string + + // A container that provides information about encryption. If + // SourceSelectionCriteria is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration + + // A container specifying replication metrics-related settings enabling + // replication metrics and events. + Metrics *Metrics + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects must + // be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. For valid values, see the StorageClass + // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass StorageClass + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used. +type Encryption struct { + + // The server-side encryption algorithm used when storing job results in Amazon S3 + // (for example, AES256, aws:kms ). + // + // This member is required. + EncryptionType ServerSideEncryption + + // If the encryption type is aws:kms , this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string + + // If the encryption type is aws:kms , this optional value specifies the ID of the + // symmetric encryption customer managed key to use for encryption of job results. + // Amazon S3 only supports symmetric encryption KMS keys. For more information, see + // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSKeyId *string + + noSmithyDocumentSerde +} + +// Specifies encryption-related information for an Amazon S3 bucket that is a +// destination for replicated objects. +type EncryptionConfiguration struct { + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for + // the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric encryption KMS keys. For more information, see + // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string + + noSmithyDocumentSerde +} + +// A message that indicates the request is complete and no more messages will be +// sent. You should not assume that the request is complete until the client +// receives an EndEvent . +type EndEvent struct { + noSmithyDocumentSerde +} + +// Container for all error elements. +type Error struct { + + // The error code is a string that uniquely identifies an error condition. It is + // meant to be read and understood by programs that detect and handle errors by + // type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // . + // - Code: AccessDenied + // - Description: Access Denied + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AccountProblem + // - Description: There is a problem with your Amazon Web Services account that + // prevents the action from completing successfully. Contact Amazon Web Services + // Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AllAccessDisabled + // - Description: All access to this Amazon S3 resource has been disabled. + // Contact Amazon Web Services Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AmbiguousGrantByEmailAddress + // - Description: The email address you provided is associated with more than + // one account. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: AuthorizationHeaderMalformed + // - Description: The authorization header you provided is invalid. + // - HTTP Status Code: 400 Bad Request + // - HTTP Status Code: N/A + // - Code: BadDigest + // - Description: The Content-MD5 you specified did not match what we received. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: BucketAlreadyExists + // - Description: The requested bucket name is not available. The bucket + // namespace is shared by all users of the system. Please select a different name + // and try again. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: BucketAlreadyOwnedByYou + // - Description: The bucket you tried to create already exists, and you own it. + // Amazon S3 returns this error in all Amazon Web Services Regions except in the + // North Virginia Region. For legacy compatibility, if you re-create an existing + // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 + // OK and resets the bucket access control lists (ACLs). + // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // - SOAP Fault Code Prefix: Client + // - Code: BucketNotEmpty + // - Description: The bucket you tried to delete is not empty. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: CredentialsNotSupported + // - Description: This request does not support credentials. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: CrossLocationLoggingProhibited + // - Description: Cross-location logging not allowed. Buckets in one geographic + // location cannot log information to a bucket in another location. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: EntityTooSmall + // - Description: Your proposed upload is smaller than the minimum allowed + // object size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: EntityTooLarge + // - Description: Your proposed upload exceeds the maximum allowed object size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: ExpiredToken + // - Description: The provided token has expired. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IllegalVersioningConfigurationException + // - Description: Indicates that the versioning configuration specified in the + // request is invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IncompleteBody + // - Description: You did not provide the number of bytes specified by the + // Content-Length HTTP header + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IncorrectNumberOfFilesInPostRequest + // - Description: POST requires exactly one file upload per request. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InlineDataTooLarge + // - Description: Inline data exceeds the maximum allowed size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InternalError + // - Description: We encountered an internal error. Please try again. + // - HTTP Status Code: 500 Internal Server Error + // - SOAP Fault Code Prefix: Server + // - Code: InvalidAccessKeyId + // - Description: The Amazon Web Services access key ID you provided does not + // exist in our records. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidAddressingHeader + // - Description: You must specify the Anonymous role. + // - HTTP Status Code: N/A + // - SOAP Fault Code Prefix: Client + // - Code: InvalidArgument + // - Description: Invalid Argument + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidBucketName + // - Description: The specified bucket is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidBucketState + // - Description: The request is not valid with the current state of the bucket. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: InvalidDigest + // - Description: The Content-MD5 you specified is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidEncryptionAlgorithmError + // - Description: The encryption request you specified is not valid. The valid + // value is AES256. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidLocationConstraint + // - Description: The specified location constraint is not valid. For more + // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // . + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidObjectState + // - Description: The action is not valid for the current state of the object. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPart + // - Description: One or more of the specified parts could not be found. The + // part might not have been uploaded, or the specified entity tag might not have + // matched the part's entity tag. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPartOrder + // - Description: The list of parts was not in ascending order. Parts list must + // be specified in order by part number. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPayer + // - Description: All access to this object has been disabled. Please contact + // Amazon Web Services Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPolicyDocument + // - Description: The content of the form does not meet the conditions specified + // in the policy document. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRange + // - Description: The requested range cannot be satisfied. + // - HTTP Status Code: 416 Requested Range Not Satisfiable + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRequest + // - Description: Please use AWS4-HMAC-SHA256 . + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: SOAP requests must be made over an HTTPS connection. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported for buckets + // with non-DNS compliant names. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported for buckets + // with periods (.) in their names. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual + // style requests. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate is not configured on this + // bucket. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported on this + // bucket. Contact Amazon Web Services Support for more information. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this + // bucket. Contact Amazon Web Services Support for more information. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidSecurity + // - Description: The provided security credentials are not valid. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidSOAPRequest + // - Description: The SOAP request body is invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidStorageClass + // - Description: The storage class you specified is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidTargetBucketForLogging + // - Description: The target bucket for logging does not exist, is not owned by + // you, or does not have the appropriate grants for the log-delivery group. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidToken + // - Description: The provided token is malformed or otherwise invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidURI + // - Description: Couldn't parse the specified URI. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: KeyTooLongError + // - Description: Your key is too long. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedACLError + // - Description: The XML you provided was not well-formed or did not validate + // against our published schema. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedPOSTRequest + // - Description: The body of your POST request is not well-formed + // multipart/form-data. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedXML + // - Description: This happens when the user sends malformed XML (XML that + // doesn't conform to the published XSD) for the configuration. The error message + // is, "The XML you provided was not well-formed or did not validate against our + // published schema." + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MaxMessageLengthExceeded + // - Description: Your request was too big. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MaxPostPreDataLengthExceededError + // - Description: Your POST request fields preceding the upload file were too + // large. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MetadataTooLarge + // - Description: Your metadata headers exceed the maximum allowed metadata + // size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MethodNotAllowed + // - Description: The specified method is not allowed against this resource. + // - HTTP Status Code: 405 Method Not Allowed + // - SOAP Fault Code Prefix: Client + // - Code: MissingAttachment + // - Description: A SOAP attachment was expected, but none were found. + // - HTTP Status Code: N/A + // - SOAP Fault Code Prefix: Client + // - Code: MissingContentLength + // - Description: You must provide the Content-Length HTTP header. + // - HTTP Status Code: 411 Length Required + // - SOAP Fault Code Prefix: Client + // - Code: MissingRequestBodyError + // - Description: This happens when the user sends an empty XML document as a + // request. The error message is, "Request body is empty." + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MissingSecurityElement + // - Description: The SOAP 1.1 request is missing a security element. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MissingSecurityHeader + // - Description: Your request is missing a required header. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: NoLoggingStatusForKey + // - Description: There is no such thing as a logging status subresource for a + // key. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchBucket + // - Description: The specified bucket does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchBucketPolicy + // - Description: The specified bucket does not have a bucket policy. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchKey + // - Description: The specified key does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchLifecycleConfiguration + // - Description: The lifecycle configuration does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchUpload + // - Description: The specified multipart upload does not exist. The upload ID + // might be invalid, or the multipart upload might have been aborted or completed. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchVersion + // - Description: Indicates that the version ID specified in the request does + // not match an existing version. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NotImplemented + // - Description: A header you provided implies functionality that is not + // implemented. + // - HTTP Status Code: 501 Not Implemented + // - SOAP Fault Code Prefix: Server + // - Code: NotSignedUp + // - Description: Your account is not signed up for the Amazon S3 service. You + // must sign up before you can use Amazon S3. You can sign up at the following URL: + // Amazon S3 (http://aws.amazon.com/s3) + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: OperationAborted + // - Description: A conflicting conditional action is currently in progress + // against this resource. Try again. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: PermanentRedirect + // - Description: The bucket you are attempting to access must be addressed + // using the specified endpoint. Send all future requests to this endpoint. + // - HTTP Status Code: 301 Moved Permanently + // - SOAP Fault Code Prefix: Client + // - Code: PreconditionFailed + // - Description: At least one of the preconditions you specified did not hold. + // - HTTP Status Code: 412 Precondition Failed + // - SOAP Fault Code Prefix: Client + // - Code: Redirect + // - Description: Temporary redirect. + // - HTTP Status Code: 307 Moved Temporarily + // - SOAP Fault Code Prefix: Client + // - Code: RestoreAlreadyInProgress + // - Description: Object restore is already in progress. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: RequestIsNotMultiPartContent + // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: RequestTimeout + // - Description: Your socket connection to the server was not read from or + // written to within the timeout period. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: RequestTimeTooSkewed + // - Description: The difference between the request time and the server's time + // is too large. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: RequestTorrentOfBucketError + // - Description: Requesting the torrent file of a bucket is not permitted. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: SignatureDoesNotMatch + // - Description: The request signature we calculated does not match the + // signature you provided. Check your Amazon Web Services secret access key and + // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: ServiceUnavailable + // - Description: Service is unable to handle request. + // - HTTP Status Code: 503 Service Unavailable + // - SOAP Fault Code Prefix: Server + // - Code: SlowDown + // - Description: Reduce your request rate. + // - HTTP Status Code: 503 Slow Down + // - SOAP Fault Code Prefix: Server + // - Code: TemporaryRedirect + // - Description: You are being redirected to the bucket while DNS updates. + // - HTTP Status Code: 307 Moved Temporarily + // - SOAP Fault Code Prefix: Client + // - Code: TokenRefreshRequired + // - Description: The provided token must be refreshed. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: TooManyBuckets + // - Description: You have attempted to create more buckets than allowed. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UnexpectedContent + // - Description: This request does not support content. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UnresolvableGrantByEmailAddress + // - Description: The email address you provided does not match any account on + // record. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UserKeyMustBeSpecified + // - Description: The bucket POST must contain the specified field name. If it + // is specified, check the order of the fields. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + Code *string + + // The error key. + Key *string + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they don't + // know how or don't care to handle. Sophisticated programs with more exhaustive + // error handling and proper internationalization are more likely to ignore the + // error message. + Message *string + + // The version ID of the error. This functionality is not supported for directory + // buckets. + VersionId *string + + noSmithyDocumentSerde +} + +// The error information. +type ErrorDocument struct { + + // The object key name to use when a 4XX class error occurs. Replacement must be + // made for object keys containing special characters (such as carriage returns) + // when using XML requests. For more information, see XML related object key + // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + // + // This member is required. + Key *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + noSmithyDocumentSerde +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + + // Specifies whether Amazon S3 replicates existing source bucket objects. + // + // This member is required. + Status ExistingObjectReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies the Amazon S3 object key name to filter on. An object key name is the +// name assigned to an object in your Amazon S3 bucket. You specify whether to +// filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can use +// to organize objects. For example, you can start the key names of related objects +// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to +// find objects in a bucket with key names that have the same prefix. A suffix is +// similar to a prefix, but it is at the end of the object key name instead of at +// the beginning. +type FilterRule struct { + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Name FilterRuleName + + // The value that the filter searches for in object key names. + Value *string + + noSmithyDocumentSerde +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool + + // The maximum number of parts allowed in the response. + MaxParts *int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the PartNumberMarker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // The marker for the current part. + PartNumberMarker *string + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + // - General purpose buckets - For GetObjectAttributes , if a additional checksum + // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 + // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the + // request, the response doesn't return Part . + // - Directory buckets - For GetObjectAttributes , no matter whether a additional + // checksum is applied to the object specified in the request, the response returns + // Part . + Parts []ObjectPart + + // The total number of parts. + TotalPartsCount *int32 + + noSmithyDocumentSerde +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + + // Retrieval tier at which the restore will be processed. + // + // This member is required. + Tier Tier + + noSmithyDocumentSerde +} + +// Container for grant information. +type Grant struct { + + // The person being granted permissions. + Grantee *Grantee + + // Specifies the permission given to the grantee. + Permission Permission + + noSmithyDocumentSerde +} + +// Container for the person being granted permissions. +type Grantee struct { + + // Type of grantee + // + // This member is required. + Type Type + + // Screen name of the grantee. + DisplayName *string + + // Email address of the grantee. Using email addresses to specify a grantee is + // only supported in the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + EmailAddress *string + + // The canonical user ID of the grantee. + ID *string + + // URI of the grantee group. + URI *string + + noSmithyDocumentSerde +} + +// Container for the Suffix element. +type IndexDocument struct { + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request to + // samplebucket/images/ the data that is returned will be for the object with the + // key name images/index.html) The suffix must not be empty and must not include a + // slash character. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + // + // This member is required. + Suffix *string + + noSmithyDocumentSerde +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + + // Name of the Principal. This functionality is not supported for directory + // buckets. + DisplayName *string + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, it + // provides a user ARN value. + ID *string + + noSmithyDocumentSerde +} + +// Describes the serialization format of the object. +type InputSerialization struct { + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType CompressionType + + // Specifies JSON as object's input serialization format. + JSON *JSONInput + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput + + noSmithyDocumentSerde +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters +// determine the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // configuration applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the + // configuration to apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For +// information about the S3 Intelligent-Tiering storage class, see Storage class +// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . +type IntelligentTieringConfiguration struct { + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Specifies the status of the configuration. + // + // This member is required. + Status IntelligentTieringStatus + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // This member is required. + Tierings []Tiering + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering +// configuration applies to. +type IntelligentTieringFilter struct { + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + Prefix *string + + // A container of a key value name pair. + Tag *Tag + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more +// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + + // Contains information about where to publish the inventory results. + // + // This member is required. + Destination *InventoryDestination + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Object versions to include in the inventory list. If set to All , the list + // includes all the object versions, which adds the version-related fields + // VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the + // list does not contain these version-related fields. + // + // This member is required. + IncludedObjectVersions InventoryIncludedObjectVersions + + // Specifies whether the inventory is enabled or disabled. If set to True , an + // inventory list is generated. If set to False , no inventory list is generated. + // + // This member is required. + IsEnabled *bool + + // Specifies the schedule for generating inventory results. + // + // This member is required. + Schedule *InventorySchedule + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter + + // Contains the optional fields that are included in the inventory results. + OptionalFields []InventoryOptionalField + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // This member is required. + S3BucketDestination *InventoryS3BucketDestination + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 + + noSmithyDocumentSerde +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + + // The prefix that an object must have to be included in the inventory results. + // + // This member is required. + Prefix *string + + noSmithyDocumentSerde +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket where inventory results will be + // published. + // + // This member is required. + Bucket *string + + // Specifies the output format of the inventory results. + // + // This member is required. + Format InventoryFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + AccountId *string + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption + + // The prefix that is prepended to all inventory results. + Prefix *string + + noSmithyDocumentSerde +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + + // Specifies how frequently inventory results are produced. + // + // This member is required. + Frequency InventoryFrequency + + noSmithyDocumentSerde +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + + // The type of JSON. Valid values: Document, Lines. + Type JSONType + + noSmithyDocumentSerde +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + + // The value used to separate individual records in the output. If no value is + // specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // This member is required. + LambdaFunctionArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Container for the expiration for the lifecycle of the object. For more +// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleExpiration struct { + + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. + Date *time.Time + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int32 + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false the + // policy takes no action. This cannot be specified with Days or Date in a + // Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool + + noSmithyDocumentSerde +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. For more +// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleRule struct { + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is + // not currently being applied. + // + // This member is required. + Status ExpirationStatus + + // Specifies the days since the initiation of an incomplete multipart upload that + // Amazon S3 will wait before permanently removing all parts of the upload. For + // more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration + + // The Filter is used to identify objects that a Lifecycle Rule applies to. A + // Filter must have exactly one of Prefix , Tag , or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter LifecycleRuleFilter + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 + // permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) to + // request that Amazon S3 delete noncurrent object versions at a specific period in + // the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action to + // request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []NoncurrentVersionTransition + + // Prefix identifying one or more objects to which the rule applies. This is no + // longer used; use Filter instead. Replacement must be made for object keys + // containing special characters (such as carriage returns) when using XML + // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + // + // Deprecated: This member has been deprecated. + Prefix *string + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []Transition + + noSmithyDocumentSerde +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the rule to + // apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. A +// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , +// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the +// Lifecycle Rule applies to all objects in the bucket. +// +// The following types satisfy this interface: +// +// LifecycleRuleFilterMemberAnd +// LifecycleRuleFilterMemberObjectSizeGreaterThan +// LifecycleRuleFilterMemberObjectSizeLessThan +// LifecycleRuleFilterMemberPrefix +// LifecycleRuleFilterMemberTag +type LifecycleRuleFilter interface { + isLifecycleRuleFilter() +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleFilterMemberAnd struct { + Value LifecycleRuleAndOperator + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} + +// Minimum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} + +// Maximum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeLessThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} + +// Prefix identifying one or more objects to which the rule applies. Replacement +// must be made for object keys containing special characters (such as carriage +// returns) when using XML requests. For more information, see XML related object +// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) +// . +type LifecycleRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} + +// This tag must exist in the object's tag set in order for the rule to apply. +type LifecycleRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} + +// Specifies the location where the bucket will be created. For directory buckets, +// the location type is Availability Zone. For more information about directory +// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type LocationInfo struct { + + // The name of the location where the bucket will be created. For directory + // buckets, the name of the location is the AZ ID of the Availability Zone where + // the bucket will be created. An example AZ ID value is usw2-az1 . + Name *string + + // The type of location where the bucket will be created. + Type LocationType + + noSmithyDocumentSerde +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to all +// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon S3 API Reference. +type LoggingEnabled struct { + + // Specifies the bucket where you want Amazon S3 to store server access logs. You + // can have your logs delivered to any bucket that you own, including the same + // bucket that is being logged. You can also configure multiple buckets to deliver + // their logs to the same target bucket. In this case, you should choose a + // different TargetPrefix for each source bucket so that the delivered log files + // can be distinguished by key. + // + // This member is required. + TargetBucket *string + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which log + // files came from which bucket. + // + // This member is required. + TargetPrefix *string + + // Container for granting information. Buckets that use the bucket owner enforced + // setting for Object Ownership don't support target grants. For more information, + // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []TargetGrant + + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat + + noSmithyDocumentSerde +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + + // Name of the object. + Name *string + + // Value of the object. + Value *string + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling +// replication metrics and events. +type Metrics struct { + + // Specifies whether the replication metrics are enabled. + // + // This member is required. + Status MetricsStatus + + // A container specifying the time threshold for emitting the + // s3:Replication:OperationMissedThreshold event. + EventThreshold *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string + + // The prefix used when evaluating an AND predicate. + Prefix *string + + // The list of tags used when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// . +type MetricsConfiguration struct { + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // This member is required. + Id *string + + // Specifies a metrics configuration filter. The metrics configuration will only + // include objects that meet the filter's criteria. A filter must be a prefix, an + // object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter MetricsFilter + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, an +// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more +// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// . +// +// The following types satisfy this interface: +// +// MetricsFilterMemberAccessPointArn +// MetricsFilterMemberAnd +// MetricsFilterMemberPrefix +// MetricsFilterMemberTag +type MetricsFilter interface { + isMetricsFilter() +} + +// The access point ARN used when evaluating a metrics filter. +type MetricsFilterMemberAccessPointArn struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsFilterMemberAnd struct { + Value MetricsAndOperator + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAnd) isMetricsFilter() {} + +// The prefix used when evaluating a metrics filter. +type MetricsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberPrefix) isMetricsFilter() {} + +// The tag used when evaluating a metrics filter. +type MetricsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberTag) isMetricsFilter() {} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm ChecksumAlgorithm + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time + + // Identifies who initiated the multipart upload. + Initiator *Initiator + + // Key of the object for which the multipart upload was initiated. + Key *string + + // Specifies the owner of the object that is part of the multipart upload. + // Directory buckets - The bucket owner is returned as the object owner for all the + // objects. + Owner *Owner + + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. + StorageClass StorageClass + + // Upload ID that identifies the multipart upload. + UploadId *string + + noSmithyDocumentSerde +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 +// permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) to +// request that Amazon S3 delete noncurrent object versions at a specific period in +// the object's lifetime. +type NoncurrentVersionExpiration struct { + + // Specifies how many newer noncurrent versions must exist before Amazon S3 can + // perform the associated action on a given version. If there are this many more + // recent noncurrent versions, Amazon S3 will take the associated action. For more + // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int32 + + noSmithyDocumentSerde +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR , +// GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon S3 +// transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA , +// INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a +// specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + + // Specifies how many newer noncurrent versions must exist before Amazon S3 can + // perform the associated action on a given version. If there are this many more + // recent noncurrent versions, Amazon S3 will take the associated action. For more + // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been + // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int32 + + // The class of storage used to store the object. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// A container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []QueueConfiguration + + // The topic to which notifications are sent and the events for which + // notifications are generated. + TopicConfigurations []TopicConfiguration + + noSmithyDocumentSerde +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) +// in the Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + + // A container for object key name prefix and suffix filtering rules. + Key *S3KeyFilter + + noSmithyDocumentSerde +} + +// An object consists of data and its descriptive metadata. +type Object struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is a hash of the object. The ETag reflects changes only to the + // contents of an object, not its metadata. The ETag may or may not be an MD5 + // digest of the object data. Whether or not it is depends on how the object was + // created and how it is encrypted as described below: + // - Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 + // or plaintext, have ETags that are an MD5 digest of their object data. + // - Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted by SSE-C + // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // - If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, and + // therefore the ETag will not be an MD5 digest. + // Directory buckets - MD5 is not supported by directory buckets. + ETag *string + + // The name that you assign to an object. You use the object key to retrieve the + // object. + Key *string + + // Creation date of the object. + LastModified *time.Time + + // The owner of the object Directory buckets - The bucket owner is returned as the + // object owner. + Owner *Owner + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Only the S3 Express One Zone storage class is supported by directory + // buckets to store objects. + RestoreStatus *RestoreStatus + + // Size in bytes of the object + Size *int64 + + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. + StorageClass ObjectStorageClass + + noSmithyDocumentSerde +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + + // Key name of the object. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + // + // This member is required. + Key *string + + // Version ID for the specific version of the object to delete. This functionality + // is not supported for directory buckets. + VersionId *string + + noSmithyDocumentSerde +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled ObjectLockEnabled + + // Specifies the Object Lock rule for the specified object. Enable the this rule + // when you apply ObjectLockConfiguration to a bucket. Bucket settings require + // both a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule + + noSmithyDocumentSerde +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + + // Indicates whether the specified object has a legal hold in place. + Status ObjectLockLegalHoldStatus + + noSmithyDocumentSerde +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + + // Indicates the Retention mode for the specified object. + Mode ObjectLockRetentionMode + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time + + noSmithyDocumentSerde +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + + // The default Object Lock retention mode and period that you want to apply to new + // objects placed in the specified bucket. Bucket settings require both a mode and + // a period. The period can be either Days or Years but you must select one. You + // cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention + + noSmithyDocumentSerde +} + +// A container for elements related to an individual part. +type ObjectPart struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int32 + + // The size of the uploaded part in bytes. + Size *int64 + + noSmithyDocumentSerde +} + +// The version of an object. +type ObjectVersion struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is an MD5 hash of that version of the object. + ETag *string + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest *bool + + // The object key. + Key *string + + // Date and time when the object was last modified. + LastModified *time.Time + + // Specifies the owner of the object. + Owner *Owner + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus + + // Size in bytes of the object. + Size *int64 + + // The class of storage used to store the object. + StorageClass ObjectVersionStorageClass + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + + // Describes an S3 location that will receive the results of the restore request. + S3 *S3Location + + noSmithyDocumentSerde +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput + + noSmithyDocumentSerde +} + +// Container for the owner's display name and ID. +type Owner struct { + + // Container for the display name of the owner. This value is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) + // This functionality is not supported for directory buckets. + DisplayName *string + + // Container for the ID of the owner. + ID *string + + noSmithyDocumentSerde +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + + // The container element for an ownership control rule. + // + // This member is required. + Rules []OwnershipControlsRule + + noSmithyDocumentSerde +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. + // + // This member is required. + ObjectOwnership ObjectOwnership + + noSmithyDocumentSerde +} + +// Container for Parquet. +type ParquetInput struct { + noSmithyDocumentSerde +} + +// Container for elements related to a part. +type Part struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Date and time at which the part was uploaded. + LastModified *time.Time + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int32 + + // Size in bytes of the uploaded part data. + Size *int64 + + noSmithyDocumentSerde +} + +// Amazon S3 keys for log objects are partitioned in the following format: +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// PartitionedPrefix defaults to EventTime delivery when server access logs are +// delivered. +type PartitionedPrefix struct { + + // Specifies the partition date source for the partitioned prefix. + // PartitionDateSource can be EventTime or DeliveryTime. + PartitionDateSource PartitionDateSource + + noSmithyDocumentSerde +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool + + noSmithyDocumentSerde +} + +// This data type contains information about progress of an operation. +type Progress struct { + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 + + // The current number of bytes of records payload data returned. + BytesReturned *int64 + + // The current number of object bytes scanned. + BytesScanned *int64 + + noSmithyDocumentSerde +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + + // The Progress event details. + Details *Progress + + noSmithyDocumentSerde +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon S3 +// bucket. You can enable the configuration options in any combination. For more +// information about when Amazon S3 considers a bucket or object public, see The +// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + + // Specifies whether Amazon S3 should block public access control lists (ACLs) for + // this bucket and objects in this bucket. Setting this element to TRUE causes the + // following behavior: + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is + // public. + // - PUT Object calls fail if the request includes a public ACL. + // - PUT Bucket calls fail if the request includes a public ACL. + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT + // Bucket policy if the specified bucket policy allows public access. Enabling this + // setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore + // all public ACLs on this bucket and objects in this bucket. Enabling this setting + // doesn't affect the persistence of any existing ACLs and doesn't prevent new + // public ACLs from being set. + IgnorePublicAcls *bool + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if the + // bucket has a public policy. Enabling this setting doesn't affect previously + // stored bucket policies, except that public and cross-account access within any + // public bucket policy, including non-public delegation to specific accounts, is + // blocked. + RestrictPublicBuckets *bool + + noSmithyDocumentSerde +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + + // A collection of bucket events for which to send notifications + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + QueueArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// The container for the records event. +type RecordsEvent struct { + + // The byte array of partial, one or more result records. + Payload []byte + + noSmithyDocumentSerde +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + + // The host name to use in the redirect request. + HostName *string + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one + // of the siblings is present. Can be present only if ReplaceKeyWith is not + // provided. Replacement must be made for object keys containing special characters + // (such as carriage returns) when using XML requests. For more information, see + // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + ReplaceKeyPrefixWith *string + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html . Not required if one of the siblings is present. Can be + // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made + // for object keys containing special characters (such as carriage returns) when + // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + ReplaceKeyWith *string + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior of all requests to a website endpoint of an +// Amazon S3 bucket. +type RedirectAllRequestsTo struct { + + // Name of the host where requests are redirected. + // + // This member is required. + HostName *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + noSmithyDocumentSerde +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. If you don't specify the Filter element, Amazon S3 assumes that +// the replication configuration is the earlier version, V1. In the earlier +// version, this element is not allowed. +type ReplicaModifications struct { + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // This member is required. + Status ReplicaModificationsStatus + + noSmithyDocumentSerde +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role + // that Amazon S3 assumes when replicating objects. For more information, see How + // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Role *string + + // A container for one or more replication rules. A replication configuration must + // have at least one rule and can contain a maximum of 1,000 rules. + // + // This member is required. + Rules []ReplicationRule + + noSmithyDocumentSerde +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + + // A container for information about the replication destination and its + // configurations including enabling the S3 Replication Time Control (S3 RTC). + // + // This member is required. + Destination *Destination + + // Specifies whether the rule is enabled. + // + // This member is required. + Status ReplicationRuleStatus + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a + // DeleteMarkerReplication element. If your Filter includes a Tag element, the + // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does + // not support replicating delete markers for tag-based rules. For an example + // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) + // . For more information about delete marker replication, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) + // . If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, see + // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) + // . + DeleteMarkerReplication *DeleteMarkerReplication + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 User Guide. + ExistingObjectReplication *ExistingObjectReplication + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix , Tag , or an And child + // element. + Filter ReplicationRuleFilter + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string + + // An object key name prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. Replacement must be made for + // object keys containing special characters (such as carriage returns) when using + // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . + // + // Deprecated: This member has been deprecated. + Prefix *string + + // The priority indicates which rule has precedence whenever two or more + // replication rules conflict. Amazon S3 will attempt to replicate objects + // according to all replication rules. However, if there are two or more rules with + // the same destination bucket, then objects will be replicated according to the + // rule with the highest priority. The higher the number, the higher the priority. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon S3 User Guide. + Priority *int32 + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter that + // you can specify for objects created with server-side encryption using a customer + // managed key stored in Amazon Web Services Key Management Service (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria + + noSmithyDocumentSerde +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// - If you specify both a Prefix and a Tag filter, wrap these filters in an And +// tag. +// - If you specify a filter based on multiple tags, wrap the Tag elements in an +// And tag. +type ReplicationRuleAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string + + // An array of tags containing key and value pairs. + Tags []Tag + + noSmithyDocumentSerde +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix , Tag , or an And child +// element. +// +// The following types satisfy this interface: +// +// ReplicationRuleFilterMemberAnd +// ReplicationRuleFilterMemberPrefix +// ReplicationRuleFilterMemberTag +type ReplicationRuleFilter interface { + isReplicationRuleFilter() +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// - If you specify both a Prefix and a Tag filter, wrap these filters in an And +// tag. +// - If you specify a filter based on multiple tags, wrap the Tag elements in an +// And tag. +type ReplicationRuleFilterMemberAnd struct { + Value ReplicationRuleAndOperator + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} + +// An object key name prefix that identifies the subset of objects to which the +// rule applies. Replacement must be made for object keys containing special +// characters (such as carriage returns) when using XML requests. For more +// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) +// . +type ReplicationRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} + +// A container for specifying a tag key and value. The rule applies only to +// objects that have the tag in their tag set. +type ReplicationRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} + +// A container specifying S3 Replication Time Control (S3 RTC) related +// information, including whether S3 RTC is enabled and the time when all objects +// and operations on objects must be replicated. Must be specified together with a +// Metrics block. +type ReplicationTime struct { + + // Specifies whether the replication time is enabled. + // + // This member is required. + Status ReplicationTimeStatus + + // A container specifying the time by which replication should be complete for all + // objects and operations on objects. + // + // This member is required. + Time *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// and replication metrics EventThreshold . +type ReplicationTimeValue struct { + + // Contains an integer specifying time in minutes. Valid value: 15 + Minutes *int32 + + noSmithyDocumentSerde +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + + // Specifies who pays for the download and request fees. + // + // This member is required. + Payer Payer + + noSmithyDocumentSerde +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool + + noSmithyDocumentSerde +} + +// Container for restore job parameters. +type RestoreRequest struct { + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation . The Days element is required for regular restores, and must not + // be provided for select requests. + Days *int32 + + // The optional description for the job. + Description *string + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation . + GlacierJobParameters *GlacierJobParameters + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters + + // Retrieval tier at which the restore will be processed. + Tier Tier + + // Type of restore request. + Type RestoreRequestType + + noSmithyDocumentSerde +} + +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. Only the S3 Express One Zone storage class is supported by directory +// buckets to store objects. +type RestoreStatus struct { + + // Specifies whether the object is currently being restored. If the object + // restoration is in progress, the header returns the value TRUE . For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object + // restoration has completed, the header returns the value FALSE . For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, + // there is no header response. + IsRestoreInProgress *bool + + // Indicates when the restored copy will expire. This value is populated only if + // the object has already been restored. For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + + // Container for redirect information. You can redirect requests to another host, + // to another page, or with another protocol. In the event of an error, you can + // specify a different error code to return. + // + // This member is required. + Redirect *Redirect + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition + + noSmithyDocumentSerde +} + +// A container for object key name prefix and suffix filtering rules. +type S3KeyFilter struct { + + // A list of containers for the key-value pair that defines the criteria for the + // filter rule. + FilterRules []FilterRule + + noSmithyDocumentSerde +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type S3Location struct { + + // The name of the bucket where the restore results will be placed. + // + // This member is required. + BucketName *string + + // The prefix that is prepended to the restore results for this request. + // + // This member is required. + Prefix *string + + // A list of grants that control access to the staged results. + AccessControlList []Grant + + // The canned ACL to apply to the restore results. + CannedACL ObjectCannedACL + + // Contains the type of server-side encryption used. + Encryption *Encryption + + // The class of storage used to store the restore results. + StorageClass StorageClass + + // The tag-set that is applied to the restore results. + Tagging *Tagging + + // A list of metadata to store with the restore results in S3. + UserMetadata []MetadataEntry + + noSmithyDocumentSerde +} + +// Specifies the byte range of the object to get the records from. A record is +// processed when its first byte is contained by the range. This parameter is +// optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the object + // being queried. If only the End parameter is supplied, it is interpreted to mean + // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. + End *int64 + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is + // supplied, it means scan from that point to the end of the file. For example, 50 + // means scan from byte 50 until the end of the file. + Start *int64 + + noSmithyDocumentSerde +} + +// The container for selecting objects from a content event stream. +// +// The following types satisfy this interface: +// +// SelectObjectContentEventStreamMemberCont +// SelectObjectContentEventStreamMemberEnd +// SelectObjectContentEventStreamMemberProgress +// SelectObjectContentEventStreamMemberRecords +// SelectObjectContentEventStreamMemberStats +type SelectObjectContentEventStream interface { + isSelectObjectContentEventStream() +} + +// The Continuation Event. +type SelectObjectContentEventStreamMemberCont struct { + Value ContinuationEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {} + +// The End Event. +type SelectObjectContentEventStreamMemberEnd struct { + Value EndEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {} + +// The Progress Event. +type SelectObjectContentEventStreamMemberProgress struct { + Value ProgressEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {} + +// The Records Event. +type SelectObjectContentEventStreamMemberRecords struct { + Value RecordsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {} + +// The Stats Event. +type SelectObjectContentEventStreamMemberStats struct { + Value StatsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} + +// Describes the parameters for Select job types. +type SelectParameters struct { + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType ExpressionType + + // Describes the serialization format of the object. + // + // This member is required. + InputSerialization *InputSerialization + + // Describes how the results of the Select job are serialized. + // + // This member is required. + OutputSerialization *OutputSerialization + + noSmithyDocumentSerde +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, this +// default encryption will be applied. If you don't specify a customer managed key +// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key +// in your Amazon Web Services account the first time that you add an object +// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for +// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + + // Server-side encryption algorithm to use for the default encryption. + // + // This member is required. + SSEAlgorithm ServerSideEncryption + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if and + // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key + // ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Key Alias: alias/alias-name + // If you use a key ID, you can run into a LogDestination undeliverable error when + // creating a VPC flow log. If you are using encryption with cross-account or + // Amazon Web Services service operations you must use a fully qualified KMS key + // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) + // . Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSMasterKeyID *string + + noSmithyDocumentSerde +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + + // Container for information about a particular server-side encryption + // configuration rule. + // + // This member is required. + Rules []ServerSideEncryptionRule + + noSmithyDocumentSerde +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, this + // default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 + // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more + // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + BucketKeyEnabled *bool + + noSmithyDocumentSerde +} + +// The established temporary security credentials of the session. Directory +// buckets - These session credentials are only supported for the authentication +// and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + + // A unique identifier that's associated with a secret access key. The access key + // ID and the secret access key are used together to sign programmatic Amazon Web + // Services requests cryptographically. + // + // This member is required. + AccessKeyId *string + + // Temporary security credentials expire after a specified interval. After + // temporary credentials expire, any calls that you make with those credentials + // will fail. So you must generate a new set of temporary credentials. Temporary + // credentials cannot be extended or refreshed beyond the original specified + // interval. + // + // This member is required. + Expiration *time.Time + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // This member is required. + SecretAccessKey *string + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// To use simple format for S3 keys for log objects, set SimplePrefix to an empty +// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + noSmithyDocumentSerde +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter that +// you can specify for objects created with server-side encryption using a customer +// managed key stored in Amazon Web Services Key Management Service (SSE-KMS). +type SourceSelectionCriteria struct { + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. If you don't specify the Filter element, Amazon S3 assumes that + // the replication configuration is the earlier version, V1. In the earlier + // version, this element is not allowed + ReplicaModifications *ReplicaModifications + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + + // Specifies whether Amazon S3 replicates objects created with server-side + // encryption using an Amazon Web Services KMS key stored in Amazon Web Services + // Key Management Service. + // + // This member is required. + Status SseKmsEncryptedObjectsStatus + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + noSmithyDocumentSerde +} + +// Container for the stats details. +type Stats struct { + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 + + // The total number of bytes of records payload data returned. + BytesReturned *int64 + + // The total number of object bytes scanned. + BytesScanned *int64 + + noSmithyDocumentSerde +} + +// Container for the Stats Event. +type StatsEvent struct { + + // The Stats event details. + Details *Stats + + noSmithyDocumentSerde +} + +// Specifies data related to access patterns to be collected and made available to +// analyze the tradeoffs between different storage classes for an Amazon S3 bucket. +type StorageClassAnalysis struct { + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport + + noSmithyDocumentSerde +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + + // The place to store the data for an analysis. + // + // This member is required. + Destination *AnalyticsExportDestination + + // The version of the output schema to use when exporting data. Must be V_1 . + // + // This member is required. + OutputSchemaVersion StorageClassAnalysisSchemaVersion + + noSmithyDocumentSerde +} + +// A container of a key value name pair. +type Tag struct { + + // Name of the object key. + // + // This member is required. + Key *string + + // Value of the tag. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// Container for TagSet elements. +type Tagging struct { + + // A collection for a set of tags + // + // This member is required. + TagSet []Tag + + noSmithyDocumentSerde +} + +// Container for granting information. Buckets that use the bucket owner enforced +// setting for Object Ownership don't support target grants. For more information, +// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + + // Container for the person being granted permissions. + Grantee *Grantee + + // Logging permissions assigned to the grantee for the bucket. + Permission BucketLogsPermission + + noSmithyDocumentSerde +} + +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix or +// SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix + + noSmithyDocumentSerde +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, +// without additional operational overhead. +type Tiering struct { + + // S3 Intelligent-Tiering access tier. See Storage class for automatically + // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // This member is required. + AccessTier IntelligentTieringAccessTier + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number of + // days specified for Archive Access tier must be at least 90 days and Deep Archive + // Access tier must be at least 180 days. The maximum can be up to 2 years (730 + // days). + // + // This member is required. + Days *int32 + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for publication of messages to an +// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects +// specified events. +type TopicConfiguration struct { + + // The Amazon S3 bucket event about which to send notifications. For more + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + TopicArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time + + // Indicates the number of days after creation when objects are transitioned to + // the specified storage class. The value must be a positive integer. + Days *int32 + + // The storage class to which you want the object to transition. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete MFADelete + + // The versioning state of the bucket. + Status BucketVersioningStatus + + noSmithyDocumentSerde +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + + // The name of the error document for the website. + ErrorDocument *ErrorDocument + + // The name of the index document for the website. + IndexDocument *IndexDocument + + // The redirect behavior for every request to this bucket's website endpoint. If + // you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []RoutingRule + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +// UnknownUnionMember is returned when a union member is returned over the wire, +// but has an unknown tag. +type UnknownUnionMember struct { + Tag string + Value []byte + + noSmithyDocumentSerde +} + +func (*UnknownUnionMember) isAnalyticsFilter() {} +func (*UnknownUnionMember) isLifecycleRuleFilter() {} +func (*UnknownUnionMember) isMetricsFilter() {} +func (*UnknownUnionMember) isReplicationRuleFilter() {} +func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go new file mode 100644 index 000000000..e954b302d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -0,0 +1,5545 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAbortMultipartUpload struct { +} + +func (*validateOpAbortMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AbortMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAbortMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCompleteMultipartUpload struct { +} + +func (*validateOpCompleteMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCompleteMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCopyObject struct { +} + +func (*validateOpCopyObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CopyObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCopyObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateBucket struct { +} + +func (*validateOpCreateBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateMultipartUpload struct { +} + +func (*validateOpCreateMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateSession struct { +} + +func (*validateOpCreateSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketCors struct { +} + +func (*validateOpDeleteBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketEncryption struct { +} + +func (*validateOpDeleteBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucket struct { +} + +func (*validateOpDeleteBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketInventoryConfiguration struct { +} + +func (*validateOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketLifecycle struct { +} + +func (*validateOpDeleteBucketLifecycle) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketLifecycleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketMetricsConfiguration struct { +} + +func (*validateOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketOwnershipControls struct { +} + +func (*validateOpDeleteBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketPolicy struct { +} + +func (*validateOpDeleteBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketReplication struct { +} + +func (*validateOpDeleteBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketTagging struct { +} + +func (*validateOpDeleteBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketWebsite struct { +} + +func (*validateOpDeleteBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObject struct { +} + +func (*validateOpDeleteObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjects struct { +} + +func (*validateOpDeleteObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjectTagging struct { +} + +func (*validateOpDeleteObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeletePublicAccessBlock struct { +} + +func (*validateOpDeletePublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeletePublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAccelerateConfiguration struct { +} + +func (*validateOpGetBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAcl struct { +} + +func (*validateOpGetBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAnalyticsConfiguration struct { +} + +func (*validateOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketCors struct { +} + +func (*validateOpGetBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketEncryption struct { +} + +func (*validateOpGetBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketInventoryConfiguration struct { +} + +func (*validateOpGetBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLifecycleConfiguration struct { +} + +func (*validateOpGetBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLocation struct { +} + +func (*validateOpGetBucketLocation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLocationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLocationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLogging struct { +} + +func (*validateOpGetBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketMetricsConfiguration struct { +} + +func (*validateOpGetBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketNotificationConfiguration struct { +} + +func (*validateOpGetBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketOwnershipControls struct { +} + +func (*validateOpGetBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicy struct { +} + +func (*validateOpGetBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicyStatus struct { +} + +func (*validateOpGetBucketPolicyStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketReplication struct { +} + +func (*validateOpGetBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketRequestPayment struct { +} + +func (*validateOpGetBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketTagging struct { +} + +func (*validateOpGetBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketVersioning struct { +} + +func (*validateOpGetBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketWebsite struct { +} + +func (*validateOpGetBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAcl struct { +} + +func (*validateOpGetObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAttributes struct { +} + +func (*validateOpGetObjectAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObject struct { +} + +func (*validateOpGetObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLegalHold struct { +} + +func (*validateOpGetObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLockConfiguration struct { +} + +func (*validateOpGetObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectRetention struct { +} + +func (*validateOpGetObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTagging struct { +} + +func (*validateOpGetObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTorrent struct { +} + +func (*validateOpGetObjectTorrent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTorrentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTorrentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicAccessBlock struct { +} + +func (*validateOpGetPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadBucket struct { +} + +func (*validateOpHeadBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadObject struct { +} + +func (*validateOpHeadObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketAnalyticsConfigurations struct { +} + +func (*validateOpListBucketAnalyticsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketIntelligentTieringConfigurations struct { +} + +func (*validateOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketInventoryConfigurations struct { +} + +func (*validateOpListBucketInventoryConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketMetricsConfigurations struct { +} + +func (*validateOpListBucketMetricsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListMultipartUploads struct { +} + +func (*validateOpListMultipartUploads) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMultipartUploadsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMultipartUploadsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjects struct { +} + +func (*validateOpListObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectsV2 struct { +} + +func (*validateOpListObjectsV2) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsV2Input) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsV2Input(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectVersions struct { +} + +func (*validateOpListObjectVersions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectVersionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectVersionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListParts struct { +} + +func (*validateOpListParts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListPartsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListPartsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAccelerateConfiguration struct { +} + +func (*validateOpPutBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAcl struct { +} + +func (*validateOpPutBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAnalyticsConfiguration struct { +} + +func (*validateOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketCors struct { +} + +func (*validateOpPutBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketEncryption struct { +} + +func (*validateOpPutBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketInventoryConfiguration struct { +} + +func (*validateOpPutBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLifecycleConfiguration struct { +} + +func (*validateOpPutBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLogging struct { +} + +func (*validateOpPutBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketMetricsConfiguration struct { +} + +func (*validateOpPutBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketNotificationConfiguration struct { +} + +func (*validateOpPutBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketOwnershipControls struct { +} + +func (*validateOpPutBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketPolicy struct { +} + +func (*validateOpPutBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketReplication struct { +} + +func (*validateOpPutBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketRequestPayment struct { +} + +func (*validateOpPutBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketTagging struct { +} + +func (*validateOpPutBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketVersioning struct { +} + +func (*validateOpPutBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketWebsite struct { +} + +func (*validateOpPutBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectAcl struct { +} + +func (*validateOpPutObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObject struct { +} + +func (*validateOpPutObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLegalHold struct { +} + +func (*validateOpPutObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLockConfiguration struct { +} + +func (*validateOpPutObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectRetention struct { +} + +func (*validateOpPutObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectTagging struct { +} + +func (*validateOpPutObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutPublicAccessBlock struct { +} + +func (*validateOpPutPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRestoreObject struct { +} + +func (*validateOpRestoreObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RestoreObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRestoreObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSelectObjectContent struct { +} + +func (*validateOpSelectObjectContent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SelectObjectContentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSelectObjectContentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPartCopy struct { +} + +func (*validateOpUploadPartCopy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartCopyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartCopyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPart struct { +} + +func (*validateOpUploadPart) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpWriteGetObjectResponse struct { +} + +func (*validateOpWriteGetObjectResponse) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpWriteGetObjectResponseInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After) +} + +func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After) +} + +func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After) +} + +func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) +} + +func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) +} + +func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After) +} + +func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After) +} + +func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After) +} + +func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After) +} + +func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After) +} + +func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) +} + +func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After) +} + +func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After) +} + +func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After) +} + +func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After) +} + +func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After) +} + +func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After) +} + +func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After) +} + +func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After) +} + +func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After) +} + +func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After) +} + +func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After) +} + +func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After) +} + +func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After) +} + +func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After) +} + +func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) +} + +func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) +} + +func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After) +} + +func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After) +} + +func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After) +} + +func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After) +} + +func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After) +} + +func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After) +} + +func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After) +} + +func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After) +} + +func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After) +} + +func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After) +} + +func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After) +} + +func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObject{}, middleware.After) +} + +func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After) +} + +func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After) +} + +func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After) +} + +func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After) +} + +func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After) +} + +func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After) +} + +func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After) +} + +func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After) +} + +func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After) +} + +func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After) +} + +func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After) +} + +func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After) +} + +func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After) +} + +func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjects{}, middleware.After) +} + +func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After) +} + +func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After) +} + +func addOpListPartsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListParts{}, middleware.After) +} + +func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After) +} + +func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After) +} + +func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After) +} + +func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After) +} + +func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After) +} + +func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After) +} + +func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After) +} + +func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After) +} + +func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After) +} + +func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After) +} + +func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After) +} + +func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After) +} + +func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After) +} + +func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After) +} + +func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After) +} + +func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObject{}, middleware.After) +} + +func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After) +} + +func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After) +} + +func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After) +} + +func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After) +} + +func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After) +} + +func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After) +} + +func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After) +} + +func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After) +} + +func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After) +} + +func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After) +} + +func validateAccessControlPolicy(v *types.AccessControlPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"} + if v.Grants != nil { + if err := validateGrants(v.Grants); err != nil { + invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAccessControlTranslation(v *types.AccessControlTranslation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"} + if len(v.Owner) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Owner")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateAnalyticsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.StorageClassAnalysis == nil { + invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis")) + } else if v.StorageClassAnalysis != nil { + if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsFilter(v types.AnalyticsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"} + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + if err := validateAnalyticsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.AnalyticsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"} + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateLifecycleRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"} + if v.LoggingEnabled != nil { + if err := validateLoggingEnabled(v.LoggingEnabled); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSConfiguration(v *types.CORSConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"} + if v.CORSRules == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSRules")) + } else if v.CORSRules != nil { + if err := validateCORSRules(v.CORSRules); err != nil { + invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRule(v *types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRule"} + if v.AllowedMethods == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods")) + } + if v.AllowedOrigins == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRules(v []types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRules"} + for i := range v { + if err := validateCORSRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDelete(v *types.Delete) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Delete"} + if v.Objects == nil { + invalidParams.Add(smithy.NewErrParamRequired("Objects")) + } else if v.Objects != nil { + if err := validateObjectIdentifierList(v.Objects); err != nil { + invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDestination(v *types.Destination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Destination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccessControlTranslation != nil { + if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicationTime != nil { + if err := validateReplicationTime(v.ReplicationTime); err != nil { + invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError)) + } + } + if v.Metrics != nil { + if err := validateMetrics(v.Metrics); err != nil { + invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEncryption(v *types.Encryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Encryption"} + if len(v.EncryptionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateErrorDocument(v *types.ErrorDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateExistingObjectReplication(v *types.ExistingObjectReplication) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlacierJobParameters(v *types.GlacierJobParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"} + if len(v.Tier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Tier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrant(v *types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrantee(v *types.Grantee) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grantee"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrants(v []types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grants"} + for i := range v { + if err := validateGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIndexDocument(v *types.IndexDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"} + if v.Suffix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Suffix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateIntelligentTieringFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Tierings == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tierings")) + } else if v.Tierings != nil { + if err := validateTieringList(v.Tierings); err != nil { + invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"} + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) + } + } + if v.And != nil { + if err := validateIntelligentTieringAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryConfiguration(v *types.InventoryConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"} + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateInventoryDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if v.IsEnabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("IsEnabled")) + } + if v.Filter != nil { + if err := validateInventoryFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if len(v.IncludedObjectVersions) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions")) + } + if v.Schedule == nil { + invalidParams.Add(smithy.NewErrParamRequired("Schedule")) + } else if v.Schedule != nil { + if err := validateInventorySchedule(v.Schedule); err != nil { + invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryDestination(v *types.InventoryDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryEncryption(v *types.InventoryEncryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"} + if v.SSEKMS != nil { + if err := validateSSEKMS(v.SSEKMS); err != nil { + invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryFilter(v *types.InventoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"} + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Encryption != nil { + if err := validateInventoryEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventorySchedule(v *types.InventorySchedule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"} + if len(v.Frequency) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Frequency")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"} + if v.LambdaFunctionArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"} + for i := range v { + if err := validateLambdaFunctionConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRule(v *types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"} + if v.Filter != nil { + if err := validateLifecycleRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.LifecycleRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRules(v []types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"} + for i := range v { + if err := validateLifecycleRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLoggingEnabled(v *types.LoggingEnabled) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"} + if v.TargetBucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetBucket")) + } + if v.TargetGrants != nil { + if err := validateTargetGrants(v.TargetGrants); err != nil { + invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError)) + } + } + if v.TargetPrefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetrics(v *types.Metrics) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Metrics"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsAndOperator(v *types.MetricsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsConfiguration(v *types.MetricsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateMetricsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsFilter(v types.MetricsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"} + switch uv := v.(type) { + case *types.MetricsFilterMemberAnd: + if err := validateMetricsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.MetricsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateNotificationConfiguration(v *types.NotificationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"} + if v.TopicConfigurations != nil { + if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil { + invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.QueueConfigurations != nil { + if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil { + invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.LambdaFunctionConfigurations != nil { + if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil { + invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifier(v *types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifierList(v []types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"} + for i := range v { + if err := validateObjectIdentifier(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputLocation(v *types.OutputLocation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"} + if v.S3 != nil { + if err := validateS3Location(v.S3); err != nil { + invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControls(v *types.OwnershipControls) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateOwnershipControlsRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"} + if len(v.ObjectOwnership) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"} + for i := range v { + if err := validateOwnershipControlsRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfiguration(v *types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"} + if v.QueueArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueueArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfigurationList(v []types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"} + for i := range v { + if err := validateQueueConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"} + if v.HostName == nil { + invalidParams.Add(smithy.NewErrParamRequired("HostName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaModifications(v *types.ReplicaModifications) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} + if v.Role == nil { + invalidParams.Add(smithy.NewErrParamRequired("Role")) + } + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateReplicationRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRule(v *types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} + if v.Filter != nil { + if err := validateReplicationRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.SourceSelectionCriteria != nil { + if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError)) + } + } + if v.ExistingObjectReplication != nil { + if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError)) + } + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.ReplicationRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRules(v []types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"} + for i := range v { + if err := validateReplicationRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationTime(v *types.ReplicationTime) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Time == nil { + invalidParams.Add(smithy.NewErrParamRequired("Time")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"} + if len(v.Payer) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Payer")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRestoreRequest(v *types.RestoreRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"} + if v.GlacierJobParameters != nil { + if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError)) + } + } + if v.SelectParameters != nil { + if err := validateSelectParameters(v.SelectParameters); err != nil { + invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError)) + } + } + if v.OutputLocation != nil { + if err := validateOutputLocation(v.OutputLocation); err != nil { + invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRule(v *types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"} + if v.Redirect == nil { + invalidParams.Add(smithy.NewErrParamRequired("Redirect")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRules(v []types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"} + for i := range v { + if err := validateRoutingRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3Location(v *types.S3Location) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3Location"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) + } + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if v.Encryption != nil { + if err := validateEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if v.AccessControlList != nil { + if err := validateGrants(v.AccessControlList); err != nil { + invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError)) + } + } + if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSelectParameters(v *types.SelectParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"} + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"} + if len(v.SSEAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateServerSideEncryptionRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"} + if v.ApplyServerSideEncryptionByDefault != nil { + if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"} + for i := range v { + if err := validateServerSideEncryptionRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"} + if v.SseKmsEncryptedObjects != nil { + if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaModifications != nil { + if err := validateReplicaModifications(v.ReplicaModifications); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSSEKMS(v *types.SSEKMS) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"} + if v.DataExport != nil { + if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil { + invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"} + if len(v.OutputSchemaVersion) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion")) + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateAnalyticsExportDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagging(v *types.Tagging) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tagging"} + if v.TagSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagSet")) + } else if v.TagSet != nil { + if err := validateTagSet(v.TagSet); err != nil { + invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagSet(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagSet"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrant(v *types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrants(v []types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"} + for i := range v { + if err := validateTargetGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTiering(v *types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tiering"} + if v.Days == nil { + invalidParams.Add(smithy.NewErrParamRequired("Days")) + } + if len(v.AccessTier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTieringList(v []types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TieringList"} + for i := range v { + if err := validateTiering(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfiguration(v *types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"} + if v.TopicArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TopicArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfigurationList(v []types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"} + for i := range v { + if err := validateTopicConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"} + if v.ErrorDocument != nil { + if err := validateErrorDocument(v.ErrorDocument); err != nil { + invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError)) + } + } + if v.IndexDocument != nil { + if err := validateIndexDocument(v.IndexDocument); err != nil { + invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError)) + } + } + if v.RedirectAllRequestsTo != nil { + if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError)) + } + } + if v.RoutingRules != nil { + if err := validateRoutingRules(v.RoutingRules); err != nil { + invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCopyObjectInput(v *CopyObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateBucketInput(v *CreateBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateSessionInput(v *CreateSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInput(v *DeleteBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectInput(v *DeleteObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Delete == nil { + invalidParams.Add(smithy.NewErrParamRequired("Delete")) + } else if v.Delete != nil { + if err := validateDelete(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAclInput(v *GetBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAclInput(v *GetObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.ObjectAttributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectInput(v *GetObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadBucketInput(v *HeadBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadObjectInput(v *HeadObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsInput(v *ListObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsV2Input(v *ListObjectsV2Input) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListPartsInput(v *ListPartsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccelerateConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAclInput(v *PutBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.AnalyticsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration")) + } else if v.AnalyticsConfiguration != nil { + if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CORSConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration")) + } else if v.CORSConfiguration != nil { + if err := validateCORSConfiguration(v.CORSConfiguration); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } else if v.ServerSideEncryptionConfiguration != nil { + if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.IntelligentTieringConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration")) + } else if v.IntelligentTieringConfiguration != nil { + if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.InventoryConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration")) + } else if v.InventoryConfiguration != nil { + if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.LifecycleConfiguration != nil { + if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.BucketLoggingStatus == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus")) + } else if v.BucketLoggingStatus != nil { + if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.MetricsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration")) + } else if v.MetricsConfiguration != nil { + if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.NotificationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration")) + } else if v.NotificationConfiguration != nil { + if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.OwnershipControls == nil { + invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls")) + } else if v.OwnershipControls != nil { + if err := validateOwnershipControls(v.OwnershipControls); err != nil { + invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ReplicationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) + } else if v.ReplicationConfiguration != nil { + if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.RequestPaymentConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration")) + } else if v.RequestPaymentConfiguration != nil { + if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.VersioningConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.WebsiteConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration")) + } else if v.WebsiteConfiguration != nil { + if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectAclInput(v *PutObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectInput(v *PutObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.PublicAccessBlockConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRestoreObjectInput(v *RestoreObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.RestoreRequest != nil { + if err := validateRestoreRequest(v.RestoreRequest); err != nil { + invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartInput(v *UploadPartInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"} + if v.RequestRoute == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestRoute")) + } + if v.RequestToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go new file mode 100644 index 000000000..69af87751 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go @@ -0,0 +1,19 @@ +// Package cache defines the interface for a key-based data store. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package cache + +// Cache defines the interface for an opaquely-typed, key-based data store. +// +// The thread-safety of this interface is undefined and is dictated by +// implementations. +type Cache interface { + // Retrieve the value associated with the given key. The returned boolean + // indicates whether the cache held a value for the given key. + Get(k interface{}) (interface{}, bool) + + // Store a value under the given key. + Put(k interface{}, v interface{}) +} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go new file mode 100644 index 000000000..02ecb0a32 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go @@ -0,0 +1,63 @@ +// Package lru implements [cache.Cache] with an LRU eviction policy. +// +// This implementation is NOT thread-safe. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package lru + +import ( + "container/list" + + "github.com/aws/smithy-go/container/private/cache" +) + +// New creates a new LRU cache with the given capacity. +func New(cap int) cache.Cache { + return &lru{ + entries: make(map[interface{}]*list.Element, cap), + cap: cap, + mru: list.New(), + } +} + +type lru struct { + entries map[interface{}]*list.Element + cap int + + mru *list.List // least-recently used is at the back +} + +type element struct { + key interface{} + value interface{} +} + +func (l *lru) Get(k interface{}) (interface{}, bool) { + e, ok := l.entries[k] + if !ok { + return nil, false + } + + l.mru.MoveToFront(e) + return e.Value.(*element).value, true +} + +func (l *lru) Put(k interface{}, v interface{}) { + if len(l.entries) == l.cap { + l.evict() + } + + ev := &element{ + key: k, + value: v, + } + e := l.mru.PushFront(ev) + l.entries[k] = e +} + +func (l *lru) evict() { + e := l.mru.Remove(l.mru.Back()) + delete(l.entries, e.(*element).key) +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go new file mode 100644 index 000000000..e24e190dc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go @@ -0,0 +1,4 @@ +// Package rulesfn provides endpoint rule functions for evaluating endpoint +// resolution rules. + +package rulesfn diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go new file mode 100644 index 000000000..5cf4a7b02 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go @@ -0,0 +1,25 @@ +package rulesfn + +// Substring returns the substring of the input provided. If the start or stop +// indexes are not valid for the input nil will be returned. If errors occur +// they will be added to the provided [ErrorCollector]. +func SubString(input string, start, stop int, reverse bool) *string { + if start < 0 || stop < 1 || start >= stop || len(input) < stop { + return nil + } + + for _, r := range input { + if r > 127 { + return nil + } + } + + if !reverse { + v := input[start:stop] + return &v + } + + rStart := len(input) - stop + rStop := len(input) - start + return SubString(input, rStart, rStop, false) +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go new file mode 100644 index 000000000..0c1154127 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go @@ -0,0 +1,130 @@ +package rulesfn + +import ( + "fmt" + "net" + "net/url" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsValidHostLabel returns if the input is a single valid [RFC 1123] host +// label. If allowSubDomains is true, will allow validation to include nested +// host labels. Returns false if the input is not a valid host label. If errors +// occur they will be added to the provided [ErrorCollector]. +// +// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt +func IsValidHostLabel(input string, allowSubDomains bool) bool { + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} + +// ParseURL returns a [URL] if the provided string could be parsed. Returns nil +// if the string could not be parsed. Any parsing error will be added to the +// [ErrorCollector]. +// +// If the input URL string contains an IP6 address with a zone index. The +// returned [builtin.URL.Authority] value will contain the percent escaped (%) +// zone index separator. +func ParseURL(input string) *URL { + u, err := url.Parse(input) + if err != nil { + return nil + } + + if u.RawQuery != "" { + return nil + } + + if u.Scheme != "http" && u.Scheme != "https" { + return nil + } + + normalizedPath := u.Path + if !strings.HasPrefix(normalizedPath, "/") { + normalizedPath = "/" + normalizedPath + } + if !strings.HasSuffix(normalizedPath, "/") { + normalizedPath = normalizedPath + "/" + } + + // IP6 hosts may have zone indexes that need to be escaped to be valid in a + // URI. The Go URL parser will unescape the `%25` into `%`. This needs to + // be reverted since the returned URL will be used in string builders. + authority := strings.ReplaceAll(u.Host, "%", "%25") + + return &URL{ + Scheme: u.Scheme, + Authority: authority, + Path: u.Path, + NormalizedPath: normalizedPath, + IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil, + } +} + +// URL provides the structure describing the parts of a parsed URL returned by +// [ParseURL]. +type URL struct { + Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2 + Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3 + IsIp bool +} + +// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the +// input string. +// +// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 +func URIEncode(input string) string { + var output strings.Builder + for _, c := range []byte(input) { + if validPercentEncodedChar(c) { + output.WriteByte(c) + continue + } + + fmt.Fprintf(&output, "%%%X", c) + } + + return output.String() +} + +func validPercentEncodedChar(c byte) bool { + return (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '-' || c == '_' || c == '.' || c == '~' +} + +// hostname implements u.Hostname() but strips the ipv6 zone ID (if present) +// such that net.ParseIP can still recognize IPv6 addresses with zone IDs. +// +// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take +// that package as a dependency yet due to our min go version (1.15, netip +// starts in 1.18). When we align with go runtime deprecation policy in +// 10/2023, we can remove this. +func hostnameWithoutZone(u *url.URL) string { + full := u.Hostname() + + // this more or less mimics the internals of net/ (see unexported + // splitHostZone in that source) but throws the zone away because we don't + // need it + if i := strings.LastIndex(full, "%"); i > -1 { + return full[:i] + } + return full +} diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go new file mode 100644 index 000000000..629207672 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/sync/error.go @@ -0,0 +1,53 @@ +package sync + +import "sync" + +// OnceErr wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceErr struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceErr return a new OnceErr +func NewOnceErr() *OnceErr { + return &OnceErr{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceErr) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceErr) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceErr. +func (e *OnceErr) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a34a7577a..3332361bc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,6 +128,7 @@ github.com/aws/aws-sdk-go/aws/endpoints # github.com/aws/aws-sdk-go-v2 v1.32.7 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws +github.com/aws/aws-sdk-go-v2/aws/arn github.com/aws/aws-sdk-go-v2/aws/defaults github.com/aws/aws-sdk-go-v2/aws/middleware github.com/aws/aws-sdk-go-v2/aws/protocol/ec2query @@ -152,6 +153,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 +## explicit; go 1.20 +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi # github.com/aws/aws-sdk-go-v2/config v1.27.18 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/config @@ -177,6 +182,11 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/ini +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 +## explicit; go 1.20 +github.com/aws/aws-sdk-go-v2/internal/v4a +github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto +github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 # github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/ec2 @@ -195,14 +205,29 @@ github.com/aws/aws-sdk-go-v2/service/iam/types # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 +## explicit; go 1.20 +github.com/aws/aws-sdk-go-v2/service/internal/checksum # github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 +## explicit; go 1.20 +github.com/aws/aws-sdk-go-v2/service/internal/s3shared +github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn +github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config # github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/pricing github.com/aws/aws-sdk-go-v2/service/pricing/internal/endpoints github.com/aws/aws-sdk-go-v2/service/pricing/types +# github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 +## explicit; go 1.20 +github.com/aws/aws-sdk-go-v2/service/s3 +github.com/aws/aws-sdk-go-v2/service/s3/internal/arn +github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations +github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/s3/types # github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sso @@ -223,6 +248,8 @@ github.com/aws/aws-sdk-go-v2/service/sts/types github.com/aws/smithy-go github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer +github.com/aws/smithy-go/container/private/cache +github.com/aws/smithy-go/container/private/cache/lru github.com/aws/smithy-go/context github.com/aws/smithy-go/document github.com/aws/smithy-go/encoding @@ -230,6 +257,7 @@ github.com/aws/smithy-go/encoding/httpbinding github.com/aws/smithy-go/encoding/json github.com/aws/smithy-go/encoding/xml github.com/aws/smithy-go/endpoints +github.com/aws/smithy-go/endpoints/private/rulesfn github.com/aws/smithy-go/internal/sync/singleflight github.com/aws/smithy-go/io github.com/aws/smithy-go/logging @@ -238,6 +266,7 @@ github.com/aws/smithy-go/middleware github.com/aws/smithy-go/private/requestcompression github.com/aws/smithy-go/ptr github.com/aws/smithy-go/rand +github.com/aws/smithy-go/sync github.com/aws/smithy-go/time github.com/aws/smithy-go/tracing github.com/aws/smithy-go/transport/http