From 964034ab0457b0003693141bde66aaf95d676ab3 Mon Sep 17 00:00:00 2001 From: Toan Nguyen Date: Mon, 6 Jan 2025 23:39:06 +0700 Subject: [PATCH] introduce storage connector --- .dockerignore | 4 + .env | 11 + .github/scripts/plugin-manifest.sh | 10 + .github/scripts/plugin-manifest.yaml | 41 + .github/workflows/lint.yaml | 46 + .github/workflows/release.yaml | 104 + .github/workflows/test.yaml | 55 + .gitignore | 30 + .golangci.yml | 67 + Dockerfile | 26 + LICENSE | 201 ++ Makefile | 59 + README.md | 34 + SECURITY.md | 28 + compose.yaml | 56 + configuration/main.go | 73 + configuration/update.go | 78 + configuration/version/version.go | 44 + configuration/version/version_test.go | 14 + .../.hasura-connector/connector-metadata.yaml | 31 + connector-definition/configuration.yaml | 22 + connector/connector.go | 117 + connector/connector_test.go | 26 + connector/functions/bucket.go | 170 + connector/functions/object.go | 212 ++ connector/functions/types.generated.go | 1172 +++++++ connector/internal/collection_object.go | 53 + .../internal/collection_object_request.go | 288 ++ connector/internal/schema.go | 172 + connector/internal/types.go | 13 + connector/internal/utils.go | 46 + connector/mutation.go | 105 + connector/query.go | 110 + connector/schema.generated.go | 1935 +++++++++++ connector/storage/bucket.go | 253 ++ connector/storage/common/arguments.go | 348 ++ connector/storage/common/storage.go | 594 ++++ connector/storage/common/telemetry.go | 100 + connector/storage/common/types.generated.go | 1279 +++++++ connector/storage/common/types.go | 54 + connector/storage/config.go | 449 +++ connector/storage/manager.go | 125 + connector/storage/minio/bucket.go | 504 +++ connector/storage/minio/client.go | 275 ++ connector/storage/minio/lifecycle.go | 408 +++ connector/storage/minio/object.go | 813 +++++ connector/storage/minio/sse.go | 101 + connector/storage/minio/utils.go | 536 +++ connector/storage/object.go | 368 ++ .../01-createStorageBucket-lock/expected.json | 8 + .../01-createStorageBucket-lock/request.json | 15 + .../01-createStorageBucket-s3/expected.json | 8 + .../01-createStorageBucket-s3/request.json | 15 + .../01-createStorageBucket/expected.json | 8 + .../01-createStorageBucket/request.json | 15 + .../expected.json | 1 + .../request.json | 12 + .../expected.json | 8 + .../02-setStorageBucketLifecycle/request.json | 62 + .../expected.json | 1 + .../request.json | 13 + .../02-setStorageBucketTags/expected.json | 1 + .../02-setStorageBucketTags/request.json | 15 + .../03-uploadStorageObject/expected.json | 22 + .../03-uploadStorageObject/request.json | 96 + .../03-uploadStorageObjectText/expected.json | 22 + .../03-uploadStorageObjectText/request.json | 99 + .../04-composeStorageObject/expected.json | 23 + .../04-composeStorageObject/request.json | 88 + .../04-copyStorageObject/expected.json | 22 + .../04-copyStorageObject/request.json | 91 + .../expected.json | 8 + .../05-putStorageObjectLegalHold/request.json | 14 + .../expected.json | 8 + .../06-putStorageObjectRetention/request.json | 16 + .../07-putStorageObjectTags/expected.json | 1 + .../07-putStorageObjectTags/request.json | 17 + .../expected.json | 8 + .../request.json | 15 + .../query/downloadStorageObject/expected.json | 1 + .../query/downloadStorageObject/request.json | 42 + .../downloadStorageObjectText/expected.json | 1 + .../downloadStorageObjectText/request.json | 42 + .../query/storageBucketExists/expected.json | 9 + .../query/storageBucketExists/request.json | 22 + .../storageBucketLifecycle/expected.json | 44 + .../query/storageBucketLifecycle/request.json | 253 ++ .../storageBucketNotification/expected.json | 13 + .../storageBucketNotification/request.json | 221 ++ .../query/storageBucketPolicy/expected.json | 1 + .../query/storageBucketPolicy/request.json | 18 + .../storageBucketReplication/expected.json | 1 + .../storageBucketReplication/request.json | 192 ++ .../query/storageBucketTags/expected.json | 1 + .../query/storageBucketTags/request.json | 18 + .../storageBucketVersioning/expected.json | 14 + .../storageBucketVersioning/request.json | 39 + .../02-get/query/storageBuckets/expected.json | 12 + .../02-get/query/storageBuckets/request.json | 25 + .../storageIncompleteUploads/expected.json | 1 + .../storageIncompleteUploads/request.json | 54 + .../02-get/query/storageObject/expected.json | 44 + .../02-get/query/storageObject/request.json | 194 ++ .../storageObjectAttributes/expected.json | 28 + .../storageObjectAttributes/request.json | 149 + .../storageObjectLegalHold/expected.json | 1 + .../query/storageObjectLegalHold/request.json | 22 + .../storageObjectLockConfig/expected.json | 14 + .../storageObjectLockConfig/request.json | 39 + .../query/storageObjectTags/expected.json | 1 + .../query/storageObjectTags/request.json | 22 + .../storagePresignedDownloadUrl/request.json | 43 + .../storagePresignedHeadUrl/request.json | 47 + .../storagePresignedUploadUrl/request.json | 39 + .../expected.json | 8 + .../request.json | 13 + .../02-removeStorageObject/expected.json | 1 + .../02-removeStorageObject/request.json | 15 + .../03-removeStorageObjectTags/expected.json | 8 + .../03-removeStorageObjectTags/request.json | 13 + .../04-removeStorageObjects/expected.json | 1 + .../04-removeStorageObjects/request.json | 38 + .../expected.json | 8 + .../request.json | 12 + .../06-removeStorageBucketTags/expected.json | 8 + .../06-removeStorageBucketTags/request.json | 12 + .../07-removeStorageBucket/expected.json | 8 + .../07-removeStorageBucket/request.json | 12 + connector/types/configuration.go | 43 + connector/types/connector.go | 12 + docs/configuration.md | 110 + docs/upload-download.md | 119 + go.mod | 68 + go.sum | 147 + jsonschema/configuration.schema.json | 199 ++ jsonschema/generator.go | 40 + scripts/build-manifest.sh | 18 + scripts/test.sh | 37 + server/main.go | 25 + tests/configuration/configuration.yaml | 39 + tests/engine/.gitattributes | 1 + tests/engine/.gitignore | 3 + tests/engine/.hasura/context.yaml | 14 + .../app/metadata/ComposeStorageObject.hml | 206 ++ .../engine/app/metadata/CopyStorageObject.hml | 35 + .../app/metadata/CreateStorageBucket.hml | 33 + .../app/metadata/DownloadStorageObject.hml | 42 + .../metadata/DownloadStorageObjectText.hml | 42 + .../EnableStorageBucketVersioning.hml | 29 + .../metadata/PutStorageObjectLegalHold.hml | 35 + .../metadata/PutStorageObjectRetention.hml | 39 + .../app/metadata/PutStorageObjectTags.hml | 36 + .../RemoveIncompleteStorageUpload.hml | 31 + .../app/metadata/RemoveStorageBucket.hml | 29 + .../RemoveStorageBucketReplication.hml | 28 + .../app/metadata/RemoveStorageBucketTags.hml | 29 + .../app/metadata/RemoveStorageObject.hml | 37 + .../app/metadata/RemoveStorageObjectTags.hml | 33 + .../app/metadata/RemoveStorageObjects.hml | 78 + .../metadata/SetStorageBucketEncryption.hml | 31 + .../metadata/SetStorageBucketLifecycle.hml | 31 + .../metadata/SetStorageBucketNotification.hml | 35 + .../metadata/SetStorageBucketReplication.hml | 35 + .../app/metadata/SetStorageBucketTags.hml | 31 + .../metadata/SetStorageObjectLockConfig.hml | 34 + .../app/metadata/StorageBucketEncryption.hml | 115 + .../app/metadata/StorageBucketExists.hml | 29 + .../app/metadata/StorageBucketLifecycle.hml | 440 +++ .../metadata/StorageBucketNotification.hml | 266 ++ .../app/metadata/StorageBucketPolicy.hml | 29 + .../app/metadata/StorageBucketReplication.hml | 339 ++ .../engine/app/metadata/StorageBucketTags.hml | 29 + .../app/metadata/StorageBucketVersioning.hml | 65 + tests/engine/app/metadata/StorageBuckets.hml | 57 + .../app/metadata/StorageIncompleteUploads.hml | 72 + tests/engine/app/metadata/StorageObject.hml | 41 + .../app/metadata/StorageObjectAttributes.hml | 207 ++ .../app/metadata/StorageObjectLegalHold.hml | 33 + .../app/metadata/StorageObjectLockConfig.hml | 64 + .../engine/app/metadata/StorageObjectTags.hml | 33 + tests/engine/app/metadata/StorageObjects.hml | 394 +++ .../metadata/StoragePresignedDownloadUrl.hml | 69 + .../app/metadata/StoragePresignedHeadUrl.hml | 39 + .../metadata/StoragePresignedUploadUrl.hml | 37 + .../SuspendStorageBucketVersioning.hml | 29 + .../app/metadata/UploadStorageObject.hml | 125 + .../app/metadata/UploadStorageObjectText.hml | 36 + tests/engine/app/metadata/storage-types.hml | 806 +++++ tests/engine/app/metadata/storage.hml | 2957 +++++++++++++++++ tests/engine/app/subgraph.yaml | 14 + tests/engine/compose.yaml | 39 + tests/engine/engine/Dockerfile.engine | 2 + tests/engine/globals/metadata/auth-config.hml | 7 + .../globals/metadata/compatibility-config.hml | 2 + .../globals/metadata/graphql-config.hml | 36 + tests/engine/globals/subgraph.yaml | 8 + tests/engine/hasura.yaml | 1 + tests/engine/otel-collector-config.yaml | 30 + tests/engine/supergraph.yaml | 6 + tests/minio/create-bucket.sh | 10 + 200 files changed, 22516 insertions(+) create mode 100644 .dockerignore create mode 100644 .env create mode 100755 .github/scripts/plugin-manifest.sh create mode 100644 .github/scripts/plugin-manifest.yaml create mode 100644 .github/workflows/lint.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 .github/workflows/test.yaml create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 compose.yaml create mode 100644 configuration/main.go create mode 100644 configuration/update.go create mode 100644 configuration/version/version.go create mode 100644 configuration/version/version_test.go create mode 100644 connector-definition/.hasura-connector/connector-metadata.yaml create mode 100644 connector-definition/configuration.yaml create mode 100644 connector/connector.go create mode 100644 connector/connector_test.go create mode 100644 connector/functions/bucket.go create mode 100644 connector/functions/object.go create mode 100644 connector/functions/types.generated.go create mode 100644 connector/internal/collection_object.go create mode 100644 connector/internal/collection_object_request.go create mode 100644 connector/internal/schema.go create mode 100644 connector/internal/types.go create mode 100644 connector/internal/utils.go create mode 100644 connector/mutation.go create mode 100644 connector/query.go create mode 100644 connector/schema.generated.go create mode 100644 connector/storage/bucket.go create mode 100644 connector/storage/common/arguments.go create mode 100644 connector/storage/common/storage.go create mode 100644 connector/storage/common/telemetry.go create mode 100644 connector/storage/common/types.generated.go create mode 100644 connector/storage/common/types.go create mode 100644 connector/storage/config.go create mode 100644 connector/storage/manager.go create mode 100644 connector/storage/minio/bucket.go create mode 100644 connector/storage/minio/client.go create mode 100644 connector/storage/minio/lifecycle.go create mode 100644 connector/storage/minio/object.go create mode 100644 connector/storage/minio/sse.go create mode 100644 connector/storage/minio/utils.go create mode 100644 connector/storage/object.go create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket-lock/expected.json create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket-lock/request.json create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket-s3/expected.json create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket-s3/request.json create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket/expected.json create mode 100644 connector/testdata/01-setup/mutation/01-createStorageBucket/request.json create mode 100644 connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/expected.json create mode 100644 connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/request.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/expected.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/request.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketNotification/expected.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketNotification/request.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketTags/expected.json create mode 100644 connector/testdata/01-setup/mutation/02-setStorageBucketTags/request.json create mode 100644 connector/testdata/01-setup/mutation/03-uploadStorageObject/expected.json create mode 100644 connector/testdata/01-setup/mutation/03-uploadStorageObject/request.json create mode 100644 connector/testdata/01-setup/mutation/03-uploadStorageObjectText/expected.json create mode 100644 connector/testdata/01-setup/mutation/03-uploadStorageObjectText/request.json create mode 100644 connector/testdata/01-setup/mutation/04-composeStorageObject/expected.json create mode 100644 connector/testdata/01-setup/mutation/04-composeStorageObject/request.json create mode 100644 connector/testdata/01-setup/mutation/04-copyStorageObject/expected.json create mode 100644 connector/testdata/01-setup/mutation/04-copyStorageObject/request.json create mode 100644 connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/expected.json create mode 100644 connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/request.json create mode 100644 connector/testdata/01-setup/mutation/06-putStorageObjectRetention/expected.json create mode 100644 connector/testdata/01-setup/mutation/06-putStorageObjectRetention/request.json create mode 100644 connector/testdata/01-setup/mutation/07-putStorageObjectTags/expected.json create mode 100644 connector/testdata/01-setup/mutation/07-putStorageObjectTags/request.json create mode 100644 connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/expected.json create mode 100644 connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/request.json create mode 100644 connector/testdata/02-get/query/downloadStorageObject/expected.json create mode 100644 connector/testdata/02-get/query/downloadStorageObject/request.json create mode 100644 connector/testdata/02-get/query/downloadStorageObjectText/expected.json create mode 100644 connector/testdata/02-get/query/downloadStorageObjectText/request.json create mode 100644 connector/testdata/02-get/query/storageBucketExists/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketExists/request.json create mode 100644 connector/testdata/02-get/query/storageBucketLifecycle/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketLifecycle/request.json create mode 100644 connector/testdata/02-get/query/storageBucketNotification/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketNotification/request.json create mode 100644 connector/testdata/02-get/query/storageBucketPolicy/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketPolicy/request.json create mode 100644 connector/testdata/02-get/query/storageBucketReplication/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketReplication/request.json create mode 100644 connector/testdata/02-get/query/storageBucketTags/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketTags/request.json create mode 100644 connector/testdata/02-get/query/storageBucketVersioning/expected.json create mode 100644 connector/testdata/02-get/query/storageBucketVersioning/request.json create mode 100644 connector/testdata/02-get/query/storageBuckets/expected.json create mode 100644 connector/testdata/02-get/query/storageBuckets/request.json create mode 100644 connector/testdata/02-get/query/storageIncompleteUploads/expected.json create mode 100644 connector/testdata/02-get/query/storageIncompleteUploads/request.json create mode 100644 connector/testdata/02-get/query/storageObject/expected.json create mode 100644 connector/testdata/02-get/query/storageObject/request.json create mode 100644 connector/testdata/02-get/query/storageObjectAttributes/expected.json create mode 100644 connector/testdata/02-get/query/storageObjectAttributes/request.json create mode 100644 connector/testdata/02-get/query/storageObjectLegalHold/expected.json create mode 100644 connector/testdata/02-get/query/storageObjectLegalHold/request.json create mode 100644 connector/testdata/02-get/query/storageObjectLockConfig/expected.json create mode 100644 connector/testdata/02-get/query/storageObjectLockConfig/request.json create mode 100644 connector/testdata/02-get/query/storageObjectTags/expected.json create mode 100644 connector/testdata/02-get/query/storageObjectTags/request.json create mode 100644 connector/testdata/02-get/query/storagePresignedDownloadUrl/request.json create mode 100644 connector/testdata/02-get/query/storagePresignedHeadUrl/request.json create mode 100644 connector/testdata/02-get/query/storagePresignedUploadUrl/request.json create mode 100644 connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/request.json create mode 100644 connector/testdata/03-cleanup/mutation/02-removeStorageObject/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/02-removeStorageObject/request.json create mode 100644 connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/request.json create mode 100644 connector/testdata/03-cleanup/mutation/04-removeStorageObjects/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/04-removeStorageObjects/request.json create mode 100644 connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/request.json create mode 100644 connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/request.json create mode 100644 connector/testdata/03-cleanup/mutation/07-removeStorageBucket/expected.json create mode 100644 connector/testdata/03-cleanup/mutation/07-removeStorageBucket/request.json create mode 100644 connector/types/configuration.go create mode 100644 connector/types/connector.go create mode 100644 docs/configuration.md create mode 100644 docs/upload-download.md create mode 100644 go.mod create mode 100644 go.sum create mode 100644 jsonschema/configuration.schema.json create mode 100644 jsonschema/generator.go create mode 100755 scripts/build-manifest.sh create mode 100755 scripts/test.sh create mode 100644 server/main.go create mode 100644 tests/configuration/configuration.yaml create mode 100644 tests/engine/.gitattributes create mode 100644 tests/engine/.gitignore create mode 100644 tests/engine/.hasura/context.yaml create mode 100644 tests/engine/app/metadata/ComposeStorageObject.hml create mode 100644 tests/engine/app/metadata/CopyStorageObject.hml create mode 100644 tests/engine/app/metadata/CreateStorageBucket.hml create mode 100644 tests/engine/app/metadata/DownloadStorageObject.hml create mode 100644 tests/engine/app/metadata/DownloadStorageObjectText.hml create mode 100644 tests/engine/app/metadata/EnableStorageBucketVersioning.hml create mode 100644 tests/engine/app/metadata/PutStorageObjectLegalHold.hml create mode 100644 tests/engine/app/metadata/PutStorageObjectRetention.hml create mode 100644 tests/engine/app/metadata/PutStorageObjectTags.hml create mode 100644 tests/engine/app/metadata/RemoveIncompleteStorageUpload.hml create mode 100644 tests/engine/app/metadata/RemoveStorageBucket.hml create mode 100644 tests/engine/app/metadata/RemoveStorageBucketReplication.hml create mode 100644 tests/engine/app/metadata/RemoveStorageBucketTags.hml create mode 100644 tests/engine/app/metadata/RemoveStorageObject.hml create mode 100644 tests/engine/app/metadata/RemoveStorageObjectTags.hml create mode 100644 tests/engine/app/metadata/RemoveStorageObjects.hml create mode 100644 tests/engine/app/metadata/SetStorageBucketEncryption.hml create mode 100644 tests/engine/app/metadata/SetStorageBucketLifecycle.hml create mode 100644 tests/engine/app/metadata/SetStorageBucketNotification.hml create mode 100644 tests/engine/app/metadata/SetStorageBucketReplication.hml create mode 100644 tests/engine/app/metadata/SetStorageBucketTags.hml create mode 100644 tests/engine/app/metadata/SetStorageObjectLockConfig.hml create mode 100644 tests/engine/app/metadata/StorageBucketEncryption.hml create mode 100644 tests/engine/app/metadata/StorageBucketExists.hml create mode 100644 tests/engine/app/metadata/StorageBucketLifecycle.hml create mode 100644 tests/engine/app/metadata/StorageBucketNotification.hml create mode 100644 tests/engine/app/metadata/StorageBucketPolicy.hml create mode 100644 tests/engine/app/metadata/StorageBucketReplication.hml create mode 100644 tests/engine/app/metadata/StorageBucketTags.hml create mode 100644 tests/engine/app/metadata/StorageBucketVersioning.hml create mode 100644 tests/engine/app/metadata/StorageBuckets.hml create mode 100644 tests/engine/app/metadata/StorageIncompleteUploads.hml create mode 100644 tests/engine/app/metadata/StorageObject.hml create mode 100644 tests/engine/app/metadata/StorageObjectAttributes.hml create mode 100644 tests/engine/app/metadata/StorageObjectLegalHold.hml create mode 100644 tests/engine/app/metadata/StorageObjectLockConfig.hml create mode 100644 tests/engine/app/metadata/StorageObjectTags.hml create mode 100644 tests/engine/app/metadata/StorageObjects.hml create mode 100644 tests/engine/app/metadata/StoragePresignedDownloadUrl.hml create mode 100644 tests/engine/app/metadata/StoragePresignedHeadUrl.hml create mode 100644 tests/engine/app/metadata/StoragePresignedUploadUrl.hml create mode 100644 tests/engine/app/metadata/SuspendStorageBucketVersioning.hml create mode 100644 tests/engine/app/metadata/UploadStorageObject.hml create mode 100644 tests/engine/app/metadata/UploadStorageObjectText.hml create mode 100644 tests/engine/app/metadata/storage-types.hml create mode 100644 tests/engine/app/metadata/storage.hml create mode 100644 tests/engine/app/subgraph.yaml create mode 100644 tests/engine/compose.yaml create mode 100644 tests/engine/engine/Dockerfile.engine create mode 100644 tests/engine/globals/metadata/auth-config.hml create mode 100644 tests/engine/globals/metadata/compatibility-config.hml create mode 100644 tests/engine/globals/metadata/graphql-config.hml create mode 100644 tests/engine/globals/subgraph.yaml create mode 100644 tests/engine/hasura.yaml create mode 100644 tests/engine/otel-collector-config.yaml create mode 100644 tests/engine/supergraph.yaml create mode 100755 tests/minio/create-bucket.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..4672f19 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,4 @@ +.hasura-connector/ +*.hml +.github/ +assets/ \ No newline at end of file diff --git a/.env b/.env new file mode 100644 index 0000000..1e0c49b --- /dev/null +++ b/.env @@ -0,0 +1,11 @@ +STORAGE_ENDPOINT=http://minio:9000 +ACCESS_KEY_ID=test-key +SECRET_ACCESS_KEY=randomsecret +DEFAULT_BUCKET=default +S3_STORAGE_ENDPOINT=http://s3mock:9090 +S3_ACCESS_KEY_ID=test-key +S3_SECRET_ACCESS_KEY=randomsecret +S3_DEFAULT_BUCKET=bucket1 + +APP_STORAGE_READ_URL="http://local.hasura.dev:8080" +APP_STORAGE_WRITE_URL="http://local.hasura.dev:8080" \ No newline at end of file diff --git a/.github/scripts/plugin-manifest.sh b/.github/scripts/plugin-manifest.sh new file mode 100755 index 0000000..ffa5085 --- /dev/null +++ b/.github/scripts/plugin-manifest.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +export CLI_VERSION=$GITHUB_REF_NAME +export MACOS_AMD64_SHA256=$(sha256sum "_output/ndc-storage-darwin-amd64" | awk '{ print $1 }') +export MACOS_ARM64_SHA256=$(sha256sum "_output/ndc-storage-darwin-arm64" | awk '{ print $1 }') +export LINUX_AMD64_SHA256=$(sha256sum "_output/ndc-storage-linux-amd64" | awk '{ print $1 }') +export LINUX_ARM64_SHA256=$(sha256sum "_output/ndc-storage-linux-arm64" | awk '{ print $1 }') +export WINDOWS_AMD64_SHA256=$(sha256sum "_output/ndc-storage-windows-amd64.exe" | awk '{ print $1 }') + +envsubst < .github/scripts/plugin-manifest.yaml > release/manifest.yaml \ No newline at end of file diff --git a/.github/scripts/plugin-manifest.yaml b/.github/scripts/plugin-manifest.yaml new file mode 100644 index 0000000..e206a61 --- /dev/null +++ b/.github/scripts/plugin-manifest.yaml @@ -0,0 +1,41 @@ +name: ndc-storage +version: "${CLI_VERSION}" +shortDescription: "CLI plugin for Hasura Prometheus data connector" +homepage: https://github.com/hasura/ndc-storage +hidden: true +platforms: + - selector: darwin-arm64 + uri: "https://github.com/hasura/ndc-storage/releases/download/${CLI_VERSION}/ndc-storage-darwin-arm64" + sha256: "${MACOS_ARM64_SHA256}" + bin: "ndc-storage" + files: + - from: "./ndc-storage-darwin-arm64" + to: "ndc-storage" + - selector: linux-arm64 + uri: "https://github.com/hasura/ndc-storage/releases/download/${CLI_VERSION}/ndc-storage-linux-arm64" + sha256: "${LINUX_ARM64_SHA256}" + bin: "ndc-storage" + files: + - from: "./ndc-storage-linux-arm64" + to: "ndc-storage" + - selector: darwin-amd64 + uri: "https://github.com/hasura/ndc-storage/releases/download/${CLI_VERSION}/ndc-storage-darwin-amd64" + sha256: "${MACOS_AMD64_SHA256}" + bin: "ndc-storage" + files: + - from: "./ndc-storage-darwin-amd64" + to: "ndc-storage" + - selector: windows-amd64 + uri: "https://github.com/hasura/ndc-storage/releases/download/${CLI_VERSION}/ndc-storage-windows-amd64.exe" + sha256: "${WINDOWS_AMD64_SHA256}" + bin: "ndc-storage.exe" + files: + - from: "./ndc-storage-windows-amd64.exe" + to: "ndc-storage.exe" + - selector: linux-amd64 + uri: "https://github.com/hasura/ndc-storage/releases/download/${CLI_VERSION}/ndc-storage-linux-amd64" + sha256: "${LINUX_AMD64_SHA256}" + bin: "ndc-storage" + files: + - from: "./ndc-storage-linux-amd64" + to: "ndc-storage" diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000..f693422 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,46 @@ +name: Lint + +on: + push: + paths: + - "**.go" + - "go.mod" + - "go.sum" + - ".github/workflows/*.yaml" + +env: + GO_VERSION: 1.23 + +jobs: + detect-modules: + runs-on: ubuntu-latest + outputs: + modules: ${{ steps.set-modules.outputs.modules }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - id: set-modules + run: echo "modules=$(go list -m -json | jq -s '.' | jq -c '[.[].Dir]')" >> $GITHUB_OUTPUT + + golangci-lint: + needs: detect-modules + runs-on: ubuntu-latest + strategy: + matrix: + modules: ${{ fromJSON(needs.detect-modules.outputs.modules) }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Format + run: | + diff -u <(echo -n) <(gofmt -d -s .) + cd ndc-http-schema && diff -u <(echo -n) <(gofmt -d -s .) + - name: golangci-lint ${{ matrix.modules }} + uses: golangci/golangci-lint-action@v6 + with: + args: --timeout=5m + working-directory: ${{ matrix.modules }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..7abb85e --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,104 @@ +name: Release container definition +on: + push: + tags: + - "*" + +env: + DOCKER_REGISTRY: ghcr.io + DOCKER_IMAGE_NAME: hasura/ndc-storage + +jobs: + tests: + uses: ./.github/workflows/test.yaml + + release-image: + name: Release ndc-storage image + runs-on: ubuntu-latest + needs: [tests] + steps: + - uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + registry: ${{ env.DOCKER_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get version from tag + id: get-version + run: | + echo "tagged_version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + shell: bash + + - name: Extract metadata (tags, labels) for Docker + id: docker-metadata + uses: docker/metadata-action@v5 + with: + images: ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_IMAGE_NAME }} + + - name: Build and push + uses: docker/build-push-action@v6 + with: + push: true + tags: ${{ steps.docker-metadata.outputs.tags }} + labels: ${{ steps.docker-metadata.outputs.labels }} + platforms: linux/amd64,linux/arm64 + build-args: | + VERSION=${{ steps.get-version.outputs.tagged_version }} + + build-cli-binaries: + name: build the CLI binaries + runs-on: ubuntu-latest + needs: [release-image] + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + + - name: build the CLI + run: | + VERSION="$GITHUB_REF_NAME" make ci-build-configuration + mkdir release + .github/scripts/plugin-manifest.sh + mv _output/* release + + - uses: actions/upload-artifact@v4 + with: + path: release/manifest.yaml + if-no-files-found: error + name: plugin-manifest + + - uses: actions/upload-artifact@v4 + with: + path: release/ndc-storage-* + if-no-files-found: error + name: artifact + + - name: Get version from tag + id: get-version + run: | + echo "tagged_version=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + shell: bash + + - name: Build connector definition + run: | + ./scripts/build-manifest.sh + env: + VERSION: ${{ steps.get-version.outputs.tagged_version }} + + - name: create a draft release + uses: ncipollo/release-action@v1 + with: + draft: true + tag: ${{ steps.get-version.outputs.tagged_version }} + artifacts: release/* diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000..1710fd2 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,55 @@ +name: Unit tests + +on: + workflow_call: + pull_request: + push: + branches: + - main + +jobs: + test-go: + name: Run unit and integration tests + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + - name: Format + run: diff -u <(echo -n) <(gofmt -d -s .) + - name: Vet + run: go vet ./... + - name: Run tests + run: | + ./scripts/test.sh + - name: Go coverage format + if: ${{ github.event_name == 'pull_request' }} + run: | + go get github.com/boumenot/gocover-cobertura + go install github.com/boumenot/gocover-cobertura + gocover-cobertura < coverage.out > coverage.xml + - name: Code Coverage Summary Report + uses: irongut/CodeCoverageSummary@v1.3.0 + if: ${{ github.event_name == 'pull_request' }} + with: + filename: coverage.xml + badge: true + fail_below_min: true + format: markdown + hide_branch_rate: false + hide_complexity: true + indicators: true + output: both + thresholds: "40 70" + - name: Add Coverage PR Comment + uses: marocchino/sticky-pull-request-comment@v2 + if: ${{ github.event_name == 'pull_request' }} + with: + path: code-coverage-results.md + - name: Dump docker logs on failure + if: failure() + uses: jwalton/gh-docker-logs@v2 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f67d28b --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Text editor settings +.idea/ + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +tmp/ + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace files +go.work +go.work.sum + +# Release directory +release/ +_output/ \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..91b5a73 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,67 @@ +linters: + enable-all: true + disable: + - err113 + - lll + - exportloopref + - depguard + - godot + - wrapcheck + - varnamelen + - exhaustruct + - ireturn + - gochecknoglobals + - nilnil + - mnd + - recvcheck + +linters-settings: + lll: + line-length: 180 + + funlen: + lines: 100 + statements: 50 + ignore-comments: true + + cyclop: + max-complexity: 20 + skip-tests: true + + dupl: + # Tokens count to trigger issue. + # Default: 150 + threshold: 200 + + nestif: + min-complexity: 10 + + wsl: + strict-append: false + allow-cuddle-declarations: true + + gosec: + excludes: + - G115 + - G306 + + # gocritic: + # disabled-checks: + # - appendAssign + # gocyclo: + # min-complexity: 40 + + revive: + max-open-files: 2048 + rules: + - name: var-naming + disabled: true + + stylecheck: + checks: + - all + - -ST1003 + +issues: + exclude-files: + - ".*_test\\.go$" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f9a328b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +# build context at repo root: docker build -f Dockerfile . +FROM golang:1.23 AS builder + +WORKDIR /app + +ARG VERSION +COPY go.mod go.sum ./ +RUN go mod download +COPY . . + +RUN CGO_ENABLED=0 go build \ + -ldflags "-X github.com/hasura/ndc-storage/configuration/version.BuildVersion=${VERSION}" \ + -v -o ndc-cli ./server + +# stage 2: production image +FROM gcr.io/distroless/static-debian12:nonroot + +# Copy the binary to the production image from the builder stage. +COPY --from=builder /app/ndc-cli /ndc-cli + +ENV HASURA_CONFIGURATION_DIRECTORY=/etc/connector + +ENTRYPOINT ["/ndc-cli"] + +# Run the web service on container startup. +CMD ["serve"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..6ad7afa --- /dev/null +++ b/Makefile @@ -0,0 +1,59 @@ +VERSION ?= $(shell date +"%Y%m%d") +OUTPUT_DIR := _output + +.PHONY: format +format: + gofmt -w -s . + +.PHONY: test +test: + go test -v -race -timeout 3m ./... + +# Install golangci-lint tool to run lint locally +# https://golangci-lint.run/usage/install +.PHONY: lint +lint: + golangci-lint run --fix + +# clean the output directory +.PHONY: clean +clean: + rm -rf "$(OUTPUT_DIR)" + +.PHONY: build-configuration +build-configuration: + CGO_ENABLED=0 go build -o _output/ndc-storage ./configuration + +.PHONY: build-jsonschema +build-jsonschema: + cd jsonschema && go run . + +# build the configuration tool for all given platform/arch +.PHONY: ci-build-configuration +ci-build-configuration: clean + export CGO_ENABLED=0 && \ + go get github.com/mitchellh/gox && \ + go run github.com/mitchellh/gox -ldflags '-X github.com/hasura/ndc-storage/configuration/version.BuildVersion=$(VERSION) -s -w -extldflags "-static"' \ + -osarch="linux/amd64 linux/arm64 darwin/amd64 windows/amd64 darwin/arm64" \ + -output="$(OUTPUT_DIR)/ndc-storage-{{.OS}}-{{.Arch}}" \ + ./configuration + +.PHONY: build-supergraph-test +build-supergraph-test: + docker compose up -d --build + cd tests/engine && \ + ddn connector-link update storage --add-all-resources --subgraph ./app/subgraph.yaml && \ + ddn supergraph build local + docker compose up -d --build engine + +.PHONY: generate-api-types +generate-api-types: + hasura-ndc-go update --directories ./connector/functions,./connector/types,./connector/storage --connector-dir ./connector --schema-format go --type-only + +.PHONY: generate-test-config +generate-test-config: + go run ./configuration update -d ./tests/configuration --log-level debug + +.PHONY: start-ddn +start-ddn: + HASURA_DDN_PAT=$$(ddn auth print-pat) docker compose up -d --build \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a3547b0 --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +# Storage Connector + +Storage Connector allows you to connect cloud storage services giving you an instant GraphQL API on top of your storage data. + +This connector is built using the [Go Data Connector SDK](https://github.com/hasura/ndc-sdk-go) and implements the [Data Connector Spec](https://github.com/hasura/ndc-spec). + +## Features + +### Supported storage services + +At this moment, the connector supports S3 Compatible Storage services. + +| Service | Supported | +| -------------------- | --------- | +| AWS S3 | ✅ | +| MinIO | ✅ | +| Google Cloud Storage | ✅ | +| Cloudflare R2 | ✅ | +| DigitalOcean Spaces | ✅ | + +## Get Started + +Follow the [Quick Start Guide](https://hasura.io/docs/3.0/getting-started/overview/) in Hasura DDN docs. At the `Connect to data` step, choose the `hasura/storage` data connector from the dropdown and follow the interactive prompts to set required environment variables. + +The connector is built upon the MinIO Go Client SDK so it supports most of methods in the [API interface](https://min.io/docs/minio/linux/developers/go/API.html) + +## Documentation + +- [Configuration](./docs/configuration.md) +- [Upload/Download Objects](./docs/upload-download.md) + +## License + +Storage Connector is available under the [Apache License 2.0](./LICENSE). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..b3ceb0b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,28 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| -------- | ------------------ | +| >= 0.1.0 | :white_check_mark: | + +## Reporting a Vulnerability + +We’re extremely grateful for security researchers and users who report vulnerabilities to the community. +All reports are thoroughly investigated by a set of community volunteers. + +### When Should I Report a Vulnerability? + +- You think you have discovered a potential security vulnerability in the Storage connector or related components. +- You are unsure how a vulnerability affects the connector. +- You think you discovered a vulnerability in another project that connector depends on (e.g. Docker, etc). +- You want to report any other security risk that could potentially harm connector users. + +### When Should I NOT Report a Vulnerability? + +- You need help applying security-related updates. +- Your issue is not security-related. + +### Security Vulnerability Response + +Each report is acknowledged and analyzed by the project's maintainers. New pull requests are welcome too. diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 0000000..18b48e4 --- /dev/null +++ b/compose.yaml @@ -0,0 +1,56 @@ +include: + - tests/engine/compose.yaml +services: + ndc-storage: + build: + context: . + ports: + - 8080:8080 + volumes: + - ./tests/configuration:/etc/connector:ro + extra_hosts: + - local.hasura.dev=host-gateway + environment: + STORAGE_ENDPOINT: $STORAGE_ENDPOINT + PUBLIC_HOST: localhost:9000 + DEFAULT_BUCKET: $DEFAULT_BUCKET + ACCESS_KEY_ID: $ACCESS_KEY_ID + SECRET_ACCESS_KEY: $SECRET_ACCESS_KEY + S3_STORAGE_ENDPOINT: $S3_STORAGE_ENDPOINT + S3_PUBLIC_HOST: localhost:9010 + S3_DEFAULT_BUCKET: $S3_DEFAULT_BUCKET + S3_ACCESS_KEY_ID: $S3_ACCESS_KEY_ID + S3_SECRET_ACCESS_KEY: $S3_SECRET_ACCESS_KEY + HASURA_LOG_LEVEL: debug + OTEL_EXPORTER_OTLP_ENDPOINT: http://otel-collector:4317 + OTEL_METRICS_EXPORTER: prometheus + + minio: + image: minio/minio:RELEASE.2024-12-18T13-15-44Z + ports: + - 9000:9000 + - 9001:9001 + volumes: + - minio_data:/data + environment: + MINIO_ROOT_USER: $ACCESS_KEY_ID + MINIO_ROOT_PASSWORD: $SECRET_ACCESS_KEY + command: server /data --console-address ":9001" + + # https://github.com/adobe/S3Mock + s3mock: + image: adobe/s3mock:latest + environment: + - debug=true + - retainFilesOnExit=true + - root=containers3root + - initialBuckets=bucket1 + ports: + - 9010:9090 + - 9111:9191 + volumes: + - s3_data:/containers3root + +volumes: + minio_data: + s3_data: diff --git a/configuration/main.go b/configuration/main.go new file mode 100644 index 0000000..bf204b2 --- /dev/null +++ b/configuration/main.go @@ -0,0 +1,73 @@ +package main + +import ( + "context" + "fmt" + "log/slog" + "os" + "os/signal" + "strings" + "time" + + "github.com/alecthomas/kong" + "github.com/hasura/ndc-storage/configuration/version" + "github.com/lmittmann/tint" +) + +var cli struct { + LogLevel string `default:"info" enum:"debug,info,warn,error,DEBUG,INFO,WARN,ERROR" env:"HASURA_PLUGIN_LOG_LEVEL" help:"Log level."` + Update UpdateArguments `cmd:"" help:"Introspect metric metadata and update configuration."` + Version struct{} `cmd:"" help:"Print the CLI version."` +} + +func main() { + // Handle SIGINT (CTRL+C) gracefully. + _, stop := signal.NotifyContext(context.TODO(), os.Interrupt) + + cmd := kong.Parse(&cli, kong.UsageOnError()) + + logger, err := initLogger(cli.LogLevel) + if err != nil { + logger.Error(fmt.Sprintf("failed to initialize: %s", err)) + stop() + os.Exit(1) + } + + switch cmd.Command() { + case "update": + start := time.Now() + + slog.Info("introspecting metadata", slog.String("dir", cli.Update.Dir)) + + if err := UpdateConfig(cli.Update.Dir); err != nil { + logger.Error(fmt.Sprintf("failed to update configuration: %s", err)) + stop() + os.Exit(1) + } + + slog.Info("introspected successfully", slog.String("exec_time", time.Since(start).Round(time.Millisecond).String())) + case "version": + _, _ = fmt.Fprint(os.Stdout, version.BuildVersion) + default: + logger.Error(fmt.Sprintf("unknown command <%s>", cmd.Command())) + stop() + os.Exit(1) + } +} + +func initLogger(logLevel string) (*slog.Logger, error) { + var level slog.Level + + err := level.UnmarshalText([]byte(strings.ToUpper(logLevel))) + if err != nil { + return nil, err + } + + logger := slog.New(tint.NewHandler(os.Stderr, &tint.Options{ + Level: level, + TimeFormat: "15:04", + })) + slog.SetDefault(logger) + + return logger, nil +} diff --git a/configuration/update.go b/configuration/update.go new file mode 100644 index 0000000..0da1784 --- /dev/null +++ b/configuration/update.go @@ -0,0 +1,78 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage" + "github.com/hasura/ndc-storage/connector/types" + "gopkg.in/yaml.v3" +) + +// UpdateArguments represent input arguments of the `update` command. +type UpdateArguments struct { + Dir string `default:"." env:"HASURA_PLUGIN_CONNECTOR_CONTEXT_PATH" help:"The directory where the configuration.yaml file is present" short:"d"` +} + +// UpdateConfig validate and update the configuration. +func UpdateConfig(dir string) error { + configPath := filepath.Join(dir, types.ConfigurationFileName) + + rawBytes, err := os.ReadFile(configPath) + if err != nil { + if os.IsNotExist(err) { + return writeConfig(configPath, &defaultConfiguration) + } + + return err + } + + var config types.Configuration + if err := yaml.Unmarshal(rawBytes, &config); err != nil { + return err + } + + return config.Validate() +} + +func writeConfig(filePath string, config *types.Configuration) error { + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + _, _ = writer.WriteString("# yaml-language-server: $schema=https://raw.githubusercontent.com/hasura/ndc-storage/main/jsonschema/configuration.json\n") + encoder := yaml.NewEncoder(writer) + encoder.SetIndent(2) + + if err := encoder.Encode(config); err != nil { + return fmt.Errorf("failed to encode the configuration file: %w", err) + } + + writer.Flush() + + return os.WriteFile(filePath, buf.Bytes(), 0o644) +} + +var defaultConfiguration = types.Configuration{ + Concurrency: types.ConcurrencySettings{ + Query: 5, + Mutation: 1, + }, + Clients: []storage.ClientConfig{ + { + Type: storage.EnvStorageProviderType{ + EnvString: utils.NewEnvStringVariable("STORAGE_PROVIDER_TYPE"), + }, + Endpoint: utils.ToPtr(utils.NewEnvStringVariable("STORAGE_ENDPOINT")), + DefaultBucket: utils.NewEnvStringVariable("DEFAULT_BUCKET"), + Authentication: storage.AuthCredentials{ + Type: storage.AuthTypeStatic, + AccessKeyID: utils.ToPtr(utils.NewEnvStringVariable("ACCESS_KEY_ID")), + SecretAccessKey: utils.ToPtr(utils.NewEnvStringVariable("SECRET_ACCESS_KEY")), + }, + }, + }, +} diff --git a/configuration/version/version.go b/configuration/version/version.go new file mode 100644 index 0000000..9fa26a5 --- /dev/null +++ b/configuration/version/version.go @@ -0,0 +1,44 @@ +// Package version implements cli handling. +package version + +import ( + "runtime/debug" +) + +// DevVersion is the version string for development versions. +const DevVersion = "latest" + +// BuildVersion is the version string with which CLI is built. Set during +// the build time. +var BuildVersion = "" + +func init() { //nolint:gochecknoinits + initBuildVersion() +} + +func initBuildVersion() { + if BuildVersion != "" { + return + } + + BuildVersion = DevVersion + + bi, ok := debug.ReadBuildInfo() + if !ok { + return + } + + if bi.Main.Version != "" { + BuildVersion = bi.Main.Version + + return + } + + for _, s := range bi.Settings { + if s.Key == "vcs.revision" && s.Value != "" { + BuildVersion = s.Value + + return + } + } +} diff --git a/configuration/version/version_test.go b/configuration/version/version_test.go new file mode 100644 index 0000000..d577b9b --- /dev/null +++ b/configuration/version/version_test.go @@ -0,0 +1,14 @@ +package version + +import "testing" + +func TestVersion(t *testing.T) { + BuildVersion = "test" + + initBuildVersion() + + if BuildVersion != "test" { + t.Errorf("expected BuildVersion == test, got %s", BuildVersion) + t.FailNow() + } +} diff --git a/connector-definition/.hasura-connector/connector-metadata.yaml b/connector-definition/.hasura-connector/connector-metadata.yaml new file mode 100644 index 0000000..1f9b785 --- /dev/null +++ b/connector-definition/.hasura-connector/connector-metadata.yaml @@ -0,0 +1,31 @@ +packagingDefinition: + type: PrebuiltDockerImage + dockerImage: ghcr.io/hasura/ndc-storage:{{VERSION}} +documentationPage: https://github.com/hasura/ndc-storage +supportedEnvironmentVariables: + - name: STORAGE_PROVIDER_TYPE + description: Storage provider type. Accept one of s3 and gs + required: true + - name: ACCESS_KEY_ID + description: The access key ID + required: true + - name: SECRET_ACCESS_KEY + description: The secret access key + required: true + - name: STORAGE_ENDPOINT + description: The base endpoint of the storage service. Required if the provider is another S3 compatible service such as MinIO, Cloudflare R2. + required: false + - name: DEFAULT_BUCKET + description: The default bucket to be used. If not set the client must input the bucket in input arguments + required: false +commands: + update: ndc-storage update + upgradeConfiguration: ndc-storage version +cliPlugin: + name: ndc-storage + version: "{{VERSION}}" +dockerComposeWatch: + # copy config files into the existing container and restart it + - path: ./configuration.yaml + target: /etc/connector/configuration.yaml + action: sync+restart diff --git a/connector-definition/configuration.yaml b/connector-definition/configuration.yaml new file mode 100644 index 0000000..03721b3 --- /dev/null +++ b/connector-definition/configuration.yaml @@ -0,0 +1,22 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/hasura/ndc-storage/main/jsonschema/configuration.schema.json +clients: + - type: + env: STORAGE_PROVIDER_TYPE + endpoint: + env: STORAGE_ENDPOINT + defaultBucket: + env: DEFAULT_BUCKET + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY + region: null + defaultPresignedExpiry: 24h + maxRetries: 10 + trailingHeaders: false + allowedBuckets: [] +concurrency: + query: 5 + mutation: 1 diff --git a/connector/connector.go b/connector/connector.go new file mode 100644 index 0000000..f7ca8b0 --- /dev/null +++ b/connector/connector.go @@ -0,0 +1,117 @@ +package connector + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + "path/filepath" + + "github.com/hasura/ndc-sdk-go/connector" + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/functions" + "github.com/hasura/ndc-storage/connector/internal" + "github.com/hasura/ndc-storage/connector/storage" + "github.com/hasura/ndc-storage/connector/types" + "gopkg.in/yaml.v3" +) + +// Connector implements the SDK interface of NDC specification. +type Connector struct { + capabilities *schema.RawCapabilitiesResponse + rawSchema *schema.RawSchemaResponse + config *types.Configuration + apiHandler functions.DataConnectorHandler +} + +// ParseConfiguration validates the configuration files provided by the user, returning a validated 'Configuration', +// or throwing an error to prevents Connector startup. +func (c *Connector) ParseConfiguration(ctx context.Context, configurationDir string) (*types.Configuration, error) { + configBytes, err := os.ReadFile(filepath.Join(configurationDir, types.ConfigurationFileName)) + if err != nil { + return nil, fmt.Errorf("failed to read configuration: %w", err) + } + + var config types.Configuration + if err := yaml.Unmarshal(configBytes, &config); err != nil { + return nil, fmt.Errorf("failed to decode configuration: %w", err) + } + + c.config = &config + + connectorCapabilities := schema.CapabilitiesResponse{ + Version: "0.1.6", + Capabilities: schema.Capabilities{ + Query: schema.QueryCapabilities{ + Variables: schema.LeafCapability{}, + NestedFields: schema.NestedFieldCapabilities{}, + }, + Mutation: schema.MutationCapabilities{}, + }, + } + + rawCapabilities, err := json.Marshal(connectorCapabilities) + if err != nil { + return nil, fmt.Errorf("failed to encode capabilities: %w", err) + } + + c.capabilities = schema.NewRawCapabilitiesResponseUnsafe(rawCapabilities) + c.apiHandler = functions.DataConnectorHandler{} + + return &config, nil +} + +// TryInitState initializes the connector's in-memory state. +// +// For example, any connection pools, prepared queries, +// or other managed resources would be allocated here. +// +// In addition, this function should register any +// connector-specific metrics with the metrics registry. +func (c *Connector) TryInitState(ctx context.Context, configuration *types.Configuration, metrics *connector.TelemetryState) (*types.State, error) { + logger := connector.GetLogger(ctx) + + manager, err := storage.NewManager(ctx, configuration.Clients, logger) + if err != nil { + return nil, err + } + + connectorSchema, errs := utils.MergeSchemas(GetConnectorSchema(), internal.GetConnectorSchema(manager.GetClientIDs())) + for _, err := range errs { + slog.Debug(err.Error()) + } + + schemaBytes, err := json.Marshal(connectorSchema) + if err != nil { + return nil, fmt.Errorf("failed to encode schema: %w", err) + } + + c.rawSchema = schema.NewRawSchemaResponseUnsafe(schemaBytes) + + return &types.State{ + Storage: manager, + TelemetryState: metrics, + }, nil +} + +// HealthCheck checks the health of the connector. +// +// For example, this function should check that the connector +// is able to reach its data source over the network. +// +// Should throw if the check fails, else resolve. +func (c *Connector) HealthCheck(ctx context.Context, configuration *types.Configuration, state *types.State) error { + return nil +} + +// GetCapabilities get the connector's capabilities. +func (c *Connector) GetCapabilities(configuration *types.Configuration) schema.CapabilitiesResponseMarshaler { + return c.capabilities +} + +// GetSchema gets the connector's schema. +func (c *Connector) GetSchema(ctx context.Context, configuration *types.Configuration, _ *types.State) (schema.SchemaResponseMarshaler, error) { + return c.rawSchema, nil +} diff --git a/connector/connector_test.go b/connector/connector_test.go new file mode 100644 index 0000000..61b6cfd --- /dev/null +++ b/connector/connector_test.go @@ -0,0 +1,26 @@ +package connector + +import ( + "path/filepath" + "testing" + + "github.com/hasura/ndc-sdk-go/ndctest" +) + +func TestConnector(t *testing.T) { + t.Setenv("STORAGE_ENDPOINT", "http://localhost:9000") + t.Setenv("DEFAULT_BUCKET", "default") + t.Setenv("ACCESS_KEY_ID", "test-key") + t.Setenv("SECRET_ACCESS_KEY", "randomsecret") + t.Setenv("S3_STORAGE_ENDPOINT", "http://localhost:9010") + t.Setenv("S3_DEFAULT_BUCKET", "bucket1") + t.Setenv("S3_ACCESS_KEY_ID", "test-key") + t.Setenv("S3_SECRET_ACCESS_KEY", "randomsecret") + + for _, dir := range []string{"01-setup", "02-get", "03-cleanup"} { + ndctest.TestConnector(t, &Connector{}, ndctest.TestConnectorOptions{ + Configuration: "../tests/configuration", + TestDataDir: filepath.Join("testdata", dir), + }) + } +} diff --git a/connector/functions/bucket.go b/connector/functions/bucket.go new file mode 100644 index 0000000..941cd94 --- /dev/null +++ b/connector/functions/bucket.go @@ -0,0 +1,170 @@ +package functions + +import ( + "context" + + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/hasura/ndc-storage/connector/types" +) + +// ProcedureCreateStorageBucket creates a new bucket. +func ProcedureCreateStorageBucket(ctx context.Context, state *types.State, options *common.MakeStorageBucketOptions) (bool, error) { + if err := state.Storage.MakeBucket(ctx, options); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBuckets list all buckets. +func FunctionStorageBuckets(ctx context.Context, state *types.State, args *common.ListStorageBucketArguments) ([]common.StorageBucketInfo, error) { + return state.Storage.ListBuckets(ctx, args) +} + +// FunctionStorageBucketExists checks if a bucket exists. +func FunctionStorageBucketExists(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + return state.Storage.BucketExists(ctx, args) +} + +// ProcedureRemoveStorageBucket removes a bucket, bucket should be empty to be successfully removed. +func ProcedureRemoveStorageBucket(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + if err := state.Storage.RemoveBucket(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// ProcedureSetStorageBucketTags sets tags to a bucket. +func ProcedureSetStorageBucketTags(ctx context.Context, state *types.State, args *common.SetStorageBucketTaggingArguments) (bool, error) { + if err := state.Storage.SetBucketTagging(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBucketTags gets tags of a bucket. +func FunctionStorageBucketTags(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (map[string]string, error) { + return state.Storage.GetBucketTagging(ctx, args) +} + +// ProcedureRemoveStorageBucketTags removes all tags on a bucket. +func ProcedureRemoveStorageBucketTags(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + if err := state.Storage.RemoveBucketTagging(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBucketPolicy gets access permissions on a bucket or a prefix. +func FunctionStorageBucketPolicy(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (string, error) { + return state.Storage.GetBucketPolicy(ctx, args) +} + +// FunctionStorageBucketNotification gets notification configuration on a bucket. +func FunctionStorageBucketNotification(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.NotificationConfig, error) { + return state.Storage.GetBucketNotification(ctx, args) +} + +// ProcedureSetStorageBucketNotification sets a new notification configuration on a bucket. +func ProcedureSetStorageBucketNotification(ctx context.Context, state *types.State, args *common.SetBucketNotificationArguments) (bool, error) { + if err := state.Storage.SetBucketNotification(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// ProcedureSetStorageBucketLifecycle sets lifecycle on bucket or an object prefix. +func ProcedureSetStorageBucketLifecycle(ctx context.Context, state *types.State, args *common.SetStorageBucketLifecycleArguments) (bool, error) { + err := state.Storage.SetBucketLifecycle(ctx, args) + if err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBucketLifecycle gets lifecycle on a bucket or a prefix. +func FunctionStorageBucketLifecycle(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.BucketLifecycleConfiguration, error) { + return state.Storage.GetBucketLifecycle(ctx, args) +} + +// ProcedureSetStorageBucketEncryption sets default encryption configuration on a bucket. +func ProcedureSetStorageBucketEncryption(ctx context.Context, state *types.State, args *common.SetStorageBucketEncryptionArguments) (bool, error) { + err := state.Storage.SetBucketEncryption(ctx, args) + if err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBucketEncryption gets default encryption configuration set on a bucket. +func FunctionStorageBucketEncryption(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.ServerSideEncryptionConfiguration, error) { + return state.Storage.GetBucketEncryption(ctx, args) +} + +// ProcedureSetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func ProcedureSetStorageObjectLockConfig(ctx context.Context, state *types.State, args *common.SetStorageObjectLockArguments) (bool, error) { + err := state.Storage.SetObjectLockConfig(ctx, args) + if err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageObjectLockConfig gets object lock configuration of given bucket. +func FunctionStorageObjectLockConfig(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.StorageObjectLockConfig, error) { + return state.Storage.GetObjectLockConfig(ctx, args) +} + +// ProcedureEnableStorageBucketVersioning enables bucket versioning support. +func ProcedureEnableStorageBucketVersioning(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + if err := state.Storage.EnableVersioning(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// ProcedureSuspendStorageBucketVersioning disables bucket versioning support. +func ProcedureSuspendStorageBucketVersioning(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + if err := state.Storage.SuspendVersioning(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageBucketVersioning gets versioning configuration set on a bucket. +func FunctionStorageBucketVersioning(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.StorageBucketVersioningConfiguration, error) { + return state.Storage.GetBucketVersioning(ctx, args) +} + +// ProcedureSetStorageBucketReplication sets replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO +// to associate the source and destination buckets for replication with the replication endpoint. +func ProcedureSetStorageBucketReplication(ctx context.Context, state *types.State, args *common.SetStorageBucketReplicationArguments) (bool, error) { + if err := state.Storage.SetBucketReplication(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionGetBucketReplication gets current replication config on a bucket. +func FunctionStorageBucketReplication(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (*common.StorageReplicationConfig, error) { + return state.Storage.GetBucketReplication(ctx, args) +} + +// RemoveBucketReplication removes replication configuration on a bucket. +func ProcedureRemoveStorageBucketReplication(ctx context.Context, state *types.State, args *common.StorageBucketArguments) (bool, error) { + if err := state.Storage.RemoveBucketReplication(ctx, args); err != nil { + return false, err + } + + return true, nil +} diff --git a/connector/functions/object.go b/connector/functions/object.go new file mode 100644 index 0000000..8a47a37 --- /dev/null +++ b/connector/functions/object.go @@ -0,0 +1,212 @@ +package functions + +import ( + "context" + "io" + + "github.com/hasura/ndc-sdk-go/scalar" + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/hasura/ndc-storage/connector/types" +) + +// FunctionStorageIncompleteUploads list partially uploaded objects in a bucket. +func FunctionStorageIncompleteUploads(ctx context.Context, state *types.State, args *common.ListIncompleteUploadsArguments) ([]common.StorageObjectMultipartInfo, error) { + return state.Storage.ListIncompleteUploads(ctx, args) +} + +// FunctionDownloadStorageObject returns a stream of the object data. Most of the common errors occur when reading the stream. +func FunctionDownloadStorageObject(ctx context.Context, state *types.State, args *common.GetStorageObjectOptions) (*scalar.Bytes, error) { + reader, err := state.Storage.GetObject(ctx, args) + if err != nil { + return nil, err + } + + defer reader.Close() + + data, err := io.ReadAll(reader) + if err != nil { + return nil, schema.InternalServerError(err.Error(), nil) + } + + return scalar.NewBytes(data), nil +} + +// FunctionDownloadStorageObjectText returns the object content in plain text. Use this function only if you know exactly the file as an text file. +func FunctionDownloadStorageObjectText(ctx context.Context, state *types.State, args *common.GetStorageObjectOptions) (*string, error) { + reader, err := state.Storage.GetObject(ctx, args) + if err != nil { + return nil, err + } + + defer reader.Close() + + data, err := io.ReadAll(reader) + if err != nil { + return nil, schema.InternalServerError(err.Error(), nil) + } + + dataStr := string(data) + + return &dataStr, nil +} + +// PutStorageObjectArguments represents input arguments of the PutObject method. +type PutStorageObjectBase64Arguments struct { + common.PutStorageObjectArguments + + Data scalar.Bytes `json:"data"` +} + +// ProcedureUploadStorageObject uploads object that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, +// PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. +func ProcedureUploadStorageObject(ctx context.Context, state *types.State, args *PutStorageObjectBase64Arguments) (common.StorageUploadInfo, error) { + result, err := state.Storage.PutObject(ctx, &args.PutStorageObjectArguments, args.Data.Bytes()) + if err != nil { + return common.StorageUploadInfo{}, err + } + + return *result, nil +} + +// PutStorageObjectTextArguments represents input arguments of the PutStorageObjectText method. +type PutStorageObjectTextArguments struct { + common.PutStorageObjectArguments + + Data string `json:"data"` +} + +// ProcedureUploadStorageObjectText uploads object in plain text to the storage server. The file content is not encoded to base64 so the input size is smaller than 30%. +func ProcedureUploadStorageObjectText(ctx context.Context, state *types.State, args *PutStorageObjectTextArguments) (common.StorageUploadInfo, error) { + result, err := state.Storage.PutObject(ctx, &args.PutStorageObjectArguments, []byte(args.Data)) + if err != nil { + return common.StorageUploadInfo{}, err + } + + return *result, nil +} + +// ProcedureCopyStorageObject creates or replaces an object through server-side copying of an existing object. +// It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. +// To copy multiple source objects into a single destination object see the ComposeObject API. +func ProcedureCopyStorageObject(ctx context.Context, state *types.State, args *common.CopyStorageObjectArguments) (common.StorageUploadInfo, error) { + result, err := state.Storage.CopyObject(ctx, args) + if err != nil { + return common.StorageUploadInfo{}, err + } + + return *result, nil +} + +// ProcedureComposeStorageObject creates an object by concatenating a list of source objects using server-side copying. +func ProcedureComposeStorageObject(ctx context.Context, state *types.State, args *common.ComposeStorageObjectArguments) (common.StorageUploadInfo, error) { + result, err := state.Storage.ComposeObject(ctx, args) + if err != nil { + return common.StorageUploadInfo{}, err + } + + return *result, nil +} + +// FunctionStorageObject fetches metadata of an object. +func FunctionStorageObject(ctx context.Context, state *types.State, args *common.GetStorageObjectOptions) (*common.StorageObject, error) { + return state.Storage.StatObject(ctx, args) +} + +// ProcedureRemoveStorageObject removes an object with some specified options. +func ProcedureRemoveStorageObject(ctx context.Context, state *types.State, args *common.RemoveStorageObjectOptions) (bool, error) { + if err := state.Storage.RemoveObject(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// ProcedurePutStorageObjectRetention applies object retention lock onto an object. +func ProcedurePutStorageObjectRetention(ctx context.Context, state *types.State, args *common.PutStorageObjectRetentionOptions) (bool, error) { + if err := state.Storage.PutObjectRetention(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// ProcedureRemoveStorageObjects remove a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. +// The errors observed are sent over the error channel. +func ProcedureRemoveStorageObjects(ctx context.Context, state *types.State, args *common.RemoveStorageObjectsOptions) ([]common.RemoveStorageObjectError, error) { + return state.Storage.RemoveObjects(ctx, args) +} + +// ProcedurePutStorageObjectLegalHold applies legal-hold onto an object. +func ProcedurePutStorageObjectLegalHold(ctx context.Context, state *types.State, args *common.PutStorageObjectLegalHoldOptions) (bool, error) { + if err := state.Storage.PutObjectLegalHold(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageObjectLegalHold returns legal-hold status on a given object. +func FunctionStorageObjectLegalHold(ctx context.Context, state *types.State, args *common.GetStorageObjectLegalHoldOptions) (common.StorageLegalHoldStatus, error) { + return state.Storage.GetObjectLegalHold(ctx, args) +} + +// ProcedurePutStorageObjectTags sets new object Tags to the given object, replaces/overwrites any existing tags. +func ProcedurePutStorageObjectTags(ctx context.Context, state *types.State, args *common.PutStorageObjectTaggingOptions) (bool, error) { + if err := state.Storage.PutObjectTagging(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageObjectTags fetches Object Tags from the given object. +func FunctionStorageObjectTags(ctx context.Context, state *types.State, args *common.StorageObjectTaggingOptions) (map[string]string, error) { + return state.Storage.GetObjectTagging(ctx, args) +} + +// ProcedureRemoveStorageObjectTags removes Object Tags from the given object. +func ProcedureRemoveStorageObjectTags(ctx context.Context, state *types.State, args *common.StorageObjectTaggingOptions) (bool, error) { + if err := state.Storage.RemoveObjectTagging(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStorageObjectAttributes returns a stream of the object data. Most of the common errors occur when reading the stream. +func FunctionStorageObjectAttributes(ctx context.Context, state *types.State, args *common.StorageObjectAttributesOptions) (*common.StorageObjectAttributes, error) { + return state.Storage.GetObjectAttributes(ctx, args) +} + +// ProcedureRemoveIncompleteStorageUpload removes a partially uploaded object. +func ProcedureRemoveIncompleteStorageUpload(ctx context.Context, state *types.State, args *common.RemoveIncompleteUploadArguments) (bool, error) { + if err := state.Storage.RemoveIncompleteUpload(ctx, args); err != nil { + return false, err + } + + return true, nil +} + +// FunctionStoragePresignedDownloadUrl generates a presigned URL for HTTP GET operations. +// Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. +// The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. +func FunctionStoragePresignedDownloadUrl(ctx context.Context, state *types.State, args *common.PresignedGetStorageObjectArguments) (common.PresignedURLResponse, error) { + return state.Storage.PresignedGetObject(ctx, args) +} + +// FunctionStoragePresignedUploadUrl generates a presigned URL for HTTP PUT operations. +// Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. +// The default expiry is set to 7 days. +func FunctionStoragePresignedUploadUrl(ctx context.Context, state *types.State, args *common.PresignedPutStorageObjectArguments) (common.PresignedURLResponse, error) { + return state.Storage.PresignedPutObject(ctx, args) +} + +// FunctionStoragePresignedHeadUrl generates a presigned URL for HTTP HEAD operations. +// Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. +func FunctionStoragePresignedHeadUrl(ctx context.Context, state *types.State, args *common.PresignedGetStorageObjectArguments) (common.PresignedURLResponse, error) { + return state.Storage.PresignedHeadObject(ctx, args) +} diff --git a/connector/functions/types.generated.go b/connector/functions/types.generated.go new file mode 100644 index 0000000..cca3075 --- /dev/null +++ b/connector/functions/types.generated.go @@ -0,0 +1,1172 @@ +// Code generated by github.com/hasura/ndc-sdk-go/cmd/hasura-ndc-go, DO NOT EDIT. +package functions + +import ( + "context" + "encoding/json" + "github.com/hasura/ndc-sdk-go/connector" + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/hasura/ndc-storage/connector/types" + "go.opentelemetry.io/otel/trace" + "log/slog" + "slices" +) + +// DataConnectorHandler implements the data connector handler +type DataConnectorHandler struct{} + +// QueryExists check if the query name exists +func (dch DataConnectorHandler) QueryExists(name string) bool { + return slices.Contains(enumValues_FunctionName, name) +} +func (dch DataConnectorHandler) Query(ctx context.Context, state *types.State, request *schema.QueryRequest, rawArgs map[string]any) (*schema.RowSet, error) { + if !dch.QueryExists(request.Collection) { + return nil, utils.ErrHandlerNotfound + } + queryFields, err := utils.EvalFunctionSelectionFieldValue(request) + if err != nil { + return nil, schema.UnprocessableContentError(err.Error(), nil) + } + + result, err := dch.execQuery(context.WithValue(ctx, utils.CommandSelectionFieldKey, queryFields), state, request, queryFields, rawArgs) + if err != nil { + return nil, err + } + + return &schema.RowSet{ + Aggregates: schema.RowSetAggregates{}, + Rows: []map[string]any{ + { + "__value": result, + }, + }, + }, nil +} + +func (dch DataConnectorHandler) execQuery(ctx context.Context, state *types.State, request *schema.QueryRequest, queryFields schema.NestedField, rawArgs map[string]any) (any, error) { + span := trace.SpanFromContext(ctx) + logger := connector.GetLogger(ctx) + switch request.Collection { + case "downloadStorageObject": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.GetStorageObjectOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionDownloadStorageObject(ctx, state, &args) + + case "downloadStorageObjectText": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.GetStorageObjectOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionDownloadStorageObjectText(ctx, state, &args) + + case "storageBucketEncryption": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBucketEncryption(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageBucketExists": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionStorageBucketExists(ctx, state, &args) + + case "storageBucketLifecycle": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBucketLifecycle(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageBucketNotification": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBucketNotification(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageBucketPolicy": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionStorageBucketPolicy(ctx, state, &args) + + case "storageBucketReplication": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBucketReplication(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageBucketTags": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionStorageBucketTags(ctx, state, &args) + + case "storageBucketVersioning": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBucketVersioning(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageBuckets": + + selection, err := queryFields.AsArray() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be array", map[string]any{ + "cause": err.Error(), + }) + } + var args common.ListStorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageBuckets(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnArrayIntoSlice(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageIncompleteUploads": + + selection, err := queryFields.AsArray() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be array", map[string]any{ + "cause": err.Error(), + }) + } + var args common.ListIncompleteUploadsArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageIncompleteUploads(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnArrayIntoSlice(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageObject": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.GetStorageObjectOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageObject(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageObjectAttributes": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageObjectAttributesOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageObjectAttributes(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageObjectLegalHold": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.GetStorageObjectLegalHoldOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionStorageObjectLegalHold(ctx, state, &args) + + case "storageObjectLockConfig": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.StorageBucketArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStorageObjectLockConfig(ctx, state, &args) + + if err != nil { + return nil, err + } + + if rawResult == nil { + return nil, nil + } + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storageObjectTags": + + if len(queryFields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageObjectTaggingOptions + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + return FunctionStorageObjectTags(ctx, state, &args) + + case "storagePresignedDownloadUrl": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.PresignedGetStorageObjectArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStoragePresignedDownloadUrl(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storagePresignedHeadUrl": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.PresignedGetStorageObjectArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStoragePresignedHeadUrl(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + case "storagePresignedUploadUrl": + + selection, err := queryFields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.PresignedPutStorageObjectArguments + parseErr := args.FromValue(rawArgs) + if parseErr != nil { + return nil, schema.UnprocessableContentError("failed to resolve arguments", map[string]any{ + "cause": parseErr.Error(), + }) + } + + connector_addSpanEvent(span, logger, "execute_function", map[string]any{ + "arguments": args, + }) + rawResult, err := FunctionStoragePresignedUploadUrl(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + if err != nil { + return nil, err + } + return result, nil + + default: + return nil, utils.ErrHandlerNotfound + } +} + +var enumValues_FunctionName = []string{"downloadStorageObject", "downloadStorageObjectText", "storageBucketEncryption", "storageBucketExists", "storageBucketLifecycle", "storageBucketNotification", "storageBucketPolicy", "storageBucketReplication", "storageBucketTags", "storageBucketVersioning", "storageBuckets", "storageIncompleteUploads", "storageObject", "storageObjectAttributes", "storageObjectLegalHold", "storageObjectLockConfig", "storageObjectTags", "storagePresignedDownloadUrl", "storagePresignedHeadUrl", "storagePresignedUploadUrl"} + +// MutationExists check if the mutation name exists +func (dch DataConnectorHandler) MutationExists(name string) bool { + return slices.Contains(enumValues_ProcedureName, name) +} +func (dch DataConnectorHandler) Mutation(ctx context.Context, state *types.State, operation *schema.MutationOperation) (schema.MutationOperationResults, error) { + span := trace.SpanFromContext(ctx) + logger := connector.GetLogger(ctx) + ctx = context.WithValue(ctx, utils.CommandSelectionFieldKey, operation.Fields) + connector_addSpanEvent(span, logger, "validate_request", map[string]any{ + "operations_name": operation.Name, + }) + + switch operation.Name { + case "composeStorageObject": + + selection, err := operation.Fields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.ComposeStorageObjectArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + rawResult, err := ProcedureComposeStorageObject(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "copyStorageObject": + + selection, err := operation.Fields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args common.CopyStorageObjectArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + rawResult, err := ProcedureCopyStorageObject(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "createStorageBucket": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.MakeStorageBucketOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureCreateStorageBucket(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "enableStorageBucketVersioning": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureEnableStorageBucketVersioning(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "putStorageObjectLegalHold": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.PutStorageObjectLegalHoldOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedurePutStorageObjectLegalHold(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "putStorageObjectRetention": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.PutStorageObjectRetentionOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedurePutStorageObjectRetention(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "putStorageObjectTags": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.PutStorageObjectTaggingOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedurePutStorageObjectTags(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeIncompleteStorageUpload": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.RemoveIncompleteUploadArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveIncompleteStorageUpload(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageBucket": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveStorageBucket(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageBucketReplication": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveStorageBucketReplication(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageBucketTags": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveStorageBucketTags(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageObject": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.RemoveStorageObjectOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveStorageObject(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageObjectTags": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageObjectTaggingOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureRemoveStorageObjectTags(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "removeStorageObjects": + + selection, err := operation.Fields.AsArray() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be array", map[string]any{ + "cause": err.Error(), + }) + } + var args common.RemoveStorageObjectsOptions + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + rawResult, err := ProcedureRemoveStorageObjects(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnArrayIntoSlice(selection, rawResult) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageBucketEncryption": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetStorageBucketEncryptionArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageBucketEncryption(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageBucketLifecycle": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetStorageBucketLifecycleArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageBucketLifecycle(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageBucketNotification": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetBucketNotificationArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageBucketNotification(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageBucketReplication": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetStorageBucketReplicationArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageBucketReplication(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageBucketTags": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetStorageBucketTaggingArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageBucketTags(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "setStorageObjectLockConfig": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.SetStorageObjectLockArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSetStorageObjectLockConfig(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "suspendStorageBucketVersioning": + + if len(operation.Fields) > 0 { + return nil, schema.UnprocessableContentError("cannot evaluate selection fields for scalar", nil) + } + var args common.StorageBucketArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + result, err := ProcedureSuspendStorageBucketVersioning(ctx, state, &args) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "uploadStorageObject": + + selection, err := operation.Fields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args PutStorageObjectBase64Arguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + rawResult, err := ProcedureUploadStorageObject(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + case "uploadStorageObjectText": + + selection, err := operation.Fields.AsObject() + if err != nil { + return nil, schema.UnprocessableContentError("the selection field type must be object", map[string]any{ + "cause": err.Error(), + }) + } + var args PutStorageObjectTextArguments + if err := json.Unmarshal(operation.Arguments, &args); err != nil { + return nil, schema.UnprocessableContentError("failed to decode arguments", map[string]any{ + "cause": err.Error(), + }) + } + span.AddEvent("execute_procedure") + rawResult, err := ProcedureUploadStorageObjectText(ctx, state, &args) + + if err != nil { + return nil, err + } + + connector_addSpanEvent(span, logger, "evaluate_response_selection", map[string]any{ + "raw_result": rawResult, + }) + result, err := utils.EvalNestedColumnObject(selection, rawResult) + + if err != nil { + return nil, err + } + return schema.NewProcedureResult(result).Encode(), nil + + default: + return nil, utils.ErrHandlerNotfound + } +} + +var enumValues_ProcedureName = []string{"composeStorageObject", "copyStorageObject", "createStorageBucket", "enableStorageBucketVersioning", "putStorageObjectLegalHold", "putStorageObjectRetention", "putStorageObjectTags", "removeIncompleteStorageUpload", "removeStorageBucket", "removeStorageBucketReplication", "removeStorageBucketTags", "removeStorageObject", "removeStorageObjectTags", "removeStorageObjects", "setStorageBucketEncryption", "setStorageBucketLifecycle", "setStorageBucketNotification", "setStorageBucketReplication", "setStorageBucketTags", "setStorageObjectLockConfig", "suspendStorageBucketVersioning", "uploadStorageObject", "uploadStorageObjectText"} + +func connector_addSpanEvent(span trace.Span, logger *slog.Logger, name string, data map[string]any, options ...trace.EventOption) { + logger.Debug(name, slog.Any("data", data)) + attrs := utils.DebugJSONAttributes(data, utils.IsDebug(logger)) + span.AddEvent(name, append(options, trace.WithAttributes(attrs...))...) +} diff --git a/connector/internal/collection_object.go b/connector/internal/collection_object.go new file mode 100644 index 0000000..9ab0eaa --- /dev/null +++ b/connector/internal/collection_object.go @@ -0,0 +1,53 @@ +package internal + +import ( + "context" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage" +) + +type CollectionObjectExecutor struct { + Storage *storage.Manager + Request *schema.QueryRequest + Arguments map[string]any + Variables map[string]any +} + +// Execute executes the query request to get list of storage objects. +func (coe *CollectionObjectExecutor) Execute(ctx context.Context) (*schema.RowSet, error) { + request, err := EvalCollectionObjectRequest(coe.Request, coe.Arguments, coe.Variables) + if err != nil { + return nil, schema.UnprocessableContentError(err.Error(), nil) + } + + if !request.IsValid { + // early returns zero rows + // the evaluated query always returns empty values + return &schema.RowSet{ + Aggregates: schema.RowSetAggregates{}, + Rows: []map[string]any{}, + }, nil + } + + objects, err := coe.Storage.ListObjects(ctx, &request.Options) + if err != nil { + return nil, err + } + + rawResults := make([]map[string]any, len(objects)) + for i, object := range objects { + rawResults[i] = object.ToMap() + } + + result, err := utils.EvalObjectsWithColumnSelection(coe.Request.Query.Fields, rawResults) + if err != nil { + return nil, err + } + + return &schema.RowSet{ + Aggregates: schema.RowSetAggregates{}, + Rows: result, + }, nil +} diff --git a/connector/internal/collection_object_request.go b/connector/internal/collection_object_request.go new file mode 100644 index 0000000..045fc60 --- /dev/null +++ b/connector/internal/collection_object_request.go @@ -0,0 +1,288 @@ +package internal + +import ( + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage/common" +) + +// CollectionObjectRequest the structured predicate result which is evaluated from the raw expression. +type CollectionObjectRequest struct { + IsValid bool + Options common.ListStorageObjectsOptions + OrderBy []ColumnOrder + + variables map[string]any + objectNamePredicate *StringComparisonOperator +} + +// EvalCollectionObjectRequest evaluates the requested collection data of the query request. +func EvalCollectionObjectRequest(request *schema.QueryRequest, arguments map[string]any, variables map[string]any) (*CollectionObjectRequest, error) { + result := &CollectionObjectRequest{ + variables: variables, + } + + if len(request.Query.Predicate) > 0 { + ok, err := result.evalQueryPredicate(request.Query.Predicate) + if err != nil { + return nil, err + } + + if !ok { + return result, nil + } + } + + if result.objectNamePredicate != nil { + result.Options.Prefix = result.objectNamePredicate.Value + } + + if err := result.evalArguments(arguments); err != nil { + return nil, err + } + + result.evalSelection(request.Query.Fields) + + if request.Query.Limit != nil && *request.Query.Limit > 0 { + result.Options.MaxKeys = *request.Query.Limit + } + + orderBy, err := result.evalOrderBy(request.Query.OrderBy) + if err != nil { + return nil, err + } + + result.OrderBy = orderBy + result.IsValid = true + + return result, nil +} + +func (cor *CollectionObjectRequest) evalSelection(selection schema.QueryFields) { + if _, metadataExists := selection[StorageObjectColumnMetadata]; metadataExists { + cor.Options.WithMetadata = true + } + + if _, metadataExists := selection[StorageObjectColumnUserMetadata]; metadataExists { + cor.Options.WithMetadata = true + } + + if _, versionExists := selection[StorageObjectColumnVersionID]; versionExists { + cor.Options.WithVersions = true + } +} + +func (cor *CollectionObjectRequest) evalArguments(arguments map[string]any) error { + if len(arguments) == 0 { + return nil + } + + if rawRecursive, ok := arguments[StorageObjectArgumentRecursive]; ok { + recursive, err := utils.DecodeNullableBoolean(rawRecursive) + if err != nil { + return fmt.Errorf("%s: %w", StorageObjectArgumentRecursive, err) + } + + if recursive != nil { + cor.Options.Recursive = *recursive + } + } + + return nil +} + +func (cor *CollectionObjectRequest) evalQueryPredicate(expression schema.Expression) (bool, error) { + switch expr := expression.Interface().(type) { + case *schema.ExpressionAnd: + for _, nestedExpr := range expr.Expressions { + ok, err := cor.evalQueryPredicate(nestedExpr) + if err != nil { + return false, err + } + + if !ok { + return false, nil + } + } + + return true, nil + case *schema.ExpressionBinaryComparisonOperator: + if expr.Column.Type != schema.ComparisonTargetTypeColumn { + return false, fmt.Errorf("%s: unsupported comparison target `%s`", expr.Column.Name, expr.Column.Type) + } + + switch expr.Column.Name { + case StorageObjectColumnClientID: + return cor.evalPredicateClientID(expr) + case StorageObjectColumnBucket: + return cor.evalPredicateBucket(expr) + case StorageObjectColumnName: + return cor.evalObjectName(expr) + case StorageObjectColumnLastModified: + switch expr.Operator { + case OperatorGreater: + value, err := getComparisonValueDateTime(expr.Value, cor.variables) + if err != nil { + return false, fmt.Errorf("lastModified: %w", err) + } + + if value == nil { + return true, nil + } + + valueStr := value.Format(time.RFC3339) + if cor.Options.StartAfter == "" { + cor.Options.StartAfter = valueStr + + return true, nil + } + + return cor.Options.StartAfter == valueStr, nil + default: + return false, fmt.Errorf("unsupported operator `%s` for object name", expr.Operator) + } + default: + return false, errors.New("unsupport predicate on column " + expr.Column.Name) + } + default: + return false, fmt.Errorf("unsupported expression: %+v", expression) + } +} + +func (cor *CollectionObjectRequest) evalPredicateClientID(expr *schema.ExpressionBinaryComparisonOperator) (bool, error) { + switch expr.Operator { + case OperatorEqual: + value, err := getComparisonValueString(expr.Value, cor.variables) + if err != nil { + return false, fmt.Errorf("clientId: %w", err) + } + + if value == nil { + return true, nil + } + + if cor.Options.ClientID == nil || *cor.Options.ClientID == "" { + clientID := common.StorageClientID(*value) + cor.Options.ClientID = &clientID + + return true, nil + } + + return string(*cor.Options.ClientID) == *value, nil + default: + return false, fmt.Errorf("unsupported operator `%s` for clientId", expr.Operator) + } +} + +func (cor *CollectionObjectRequest) evalPredicateBucket(expr *schema.ExpressionBinaryComparisonOperator) (bool, error) { + switch expr.Operator { + case OperatorEqual: + value, err := getComparisonValueString(expr.Value, cor.variables) + if err != nil { + return false, fmt.Errorf("bucket: %w", err) + } + + if value == nil { + return true, nil + } + + if cor.Options.Bucket == "" { + cor.Options.Bucket = *value + + return true, nil + } + + return cor.Options.Bucket == *value, nil + default: + return false, fmt.Errorf("unsupported operator `%s` for bucket", expr.Operator) + } +} + +func (cor *CollectionObjectRequest) evalObjectName(expr *schema.ExpressionBinaryComparisonOperator) (bool, error) { + if !slices.Contains([]string{OperatorStartsWith, OperatorEqual}, expr.Operator) { + return false, fmt.Errorf("unsupported operator `%s` for object name", expr.Operator) + } + + value, err := getComparisonValueString(expr.Value, cor.variables) + if err != nil { + return false, fmt.Errorf("bucket: %w", err) + } + + if value == nil { + return true, nil + } + + if cor.objectNamePredicate == nil { + cor.objectNamePredicate = &StringComparisonOperator{ + Value: *value, + Operator: expr.Operator, + } + + return true, nil + } + + switch expr.Operator { + case OperatorStartsWith: + switch cor.objectNamePredicate.Operator { + case OperatorStartsWith: + if len(cor.objectNamePredicate.Value) >= len(*value) { + return strings.HasPrefix(cor.objectNamePredicate.Value, *value), nil + } + + if !strings.HasPrefix(*value, cor.objectNamePredicate.Value) { + return false, nil + } + + cor.objectNamePredicate.Value = *value + case OperatorEqual: + return strings.HasPrefix(cor.objectNamePredicate.Value, *value), nil + } + case OperatorEqual: + switch cor.objectNamePredicate.Operator { + case OperatorStartsWith: + if !strings.HasPrefix(cor.objectNamePredicate.Value, *value) { + return false, nil + } + + cor.objectNamePredicate = &StringComparisonOperator{ + Value: *value, + Operator: OperatorEqual, + } + case OperatorEqual: + return cor.objectNamePredicate.Value == *value, nil + } + } + + return true, nil +} + +func (cor *CollectionObjectRequest) evalOrderBy(orderBy *schema.OrderBy) ([]ColumnOrder, error) { + var results []ColumnOrder + if orderBy == nil { + return results, nil + } + + for _, elem := range orderBy.Elements { + switch target := elem.Target.Interface().(type) { + case *schema.OrderByColumn: + // if slices.Contains([]string{metadata.LabelsKey, metadata.ValuesKey}, target.Name) { + // return nil, fmt.Errorf("ordering by `%s` is unsupported", target.Name) + // } + orderBy := ColumnOrder{ + Name: target.Name, + Descending: elem.OrderDirection == schema.OrderDirectionDesc, + } + results = append(results, orderBy) + default: + return nil, fmt.Errorf("support ordering by column only, got: %v", elem.Target) + } + } + + return results, nil +} diff --git a/connector/internal/schema.go b/connector/internal/schema.go new file mode 100644 index 0000000..639792a --- /dev/null +++ b/connector/internal/schema.go @@ -0,0 +1,172 @@ +package internal + +import ( + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" +) + +const ( + CollectionStorageObject = "storageObjects" + StorageObjectName = "StorageObject" + StorageObjectColumnClientID = "clientId" + StorageObjectColumnName = "name" + StorageObjectColumnBucket = "bucket" + StorageObjectColumnLastModified = "lastModified" + StorageObjectColumnMetadata = "metadata" + StorageObjectColumnUserMetadata = "userMetadata" + StorageObjectColumnVersionID = "versionId" + StorageObjectArgumentRecursive = "recursive" +) + +const ( + OperatorEqual = "_eq" + OperatorStartsWith = "_starts_with" + OperatorGreater = "_gt" +) + +const ( + ScalarStorageClientID = "StorageClientID" + ScalarBucketName = "BucketName" + ScalarObjectPath = "ObjectPath" + ScalarFilterTimestamp = "FilterTimestamp" +) + +// GetConnectorSchema returns connector schema for object collections. +func GetConnectorSchema(clientIDs []string) *schema.SchemaResponse { //nolint:funlen + return &schema.SchemaResponse{ + Collections: []schema.CollectionInfo{ + { + Name: CollectionStorageObject, + Description: utils.ToPtr("The information of an storage object"), + Type: StorageObjectName, + Arguments: schema.CollectionInfoArguments{ + StorageObjectArgumentRecursive: schema.ArgumentInfo{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + }, + UniquenessConstraints: schema.CollectionInfoUniquenessConstraints{}, + ForeignKeys: schema.CollectionInfoForeignKeys{}, + }, + }, + ObjectTypes: schema.SchemaResponseObjectTypes{ + StorageObjectName: schema.ObjectType{ + Fields: schema.ObjectTypeFields{ + StorageObjectColumnClientID: schema.ObjectField{ + Type: schema.NewNamedType(ScalarStorageClientID).Encode(), + }, + StorageObjectColumnBucket: schema.ObjectField{ + Type: schema.NewNamedType(ScalarBucketName).Encode(), + }, + StorageObjectColumnName: schema.ObjectField{ + Type: schema.NewNamedType(ScalarObjectPath).Encode(), + }, + StorageObjectColumnLastModified: schema.ObjectField{ + Type: schema.NewNamedType(ScalarFilterTimestamp).Encode(), + }, + "checksumCrc32": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc32C": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc64Nvme": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha1": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "contentType": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "etag": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "expiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "expirationRuleId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "expires": schema.ObjectField{ + Type: schema.NewNamedType("TimestampTZ").Encode(), + }, + "grant": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("StorageGrant"))).Encode(), + }, + "isDeleteMarker": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "isLatest": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + StorageObjectColumnMetadata: schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "owner": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageOwner")).Encode(), + }, + "replicationReady": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "replicationStatus": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "restore": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRestoreInfo")).Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNamedType("Int64").Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + StorageObjectColumnUserMetadata: schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "userTagCount": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "userTags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + StorageObjectColumnVersionID: schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + }, + ScalarTypes: schema.SchemaResponseScalarTypes{ + ScalarBucketName: schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{ + OperatorEqual: schema.NewComparisonOperatorEqual().Encode(), + }, + Representation: schema.NewTypeRepresentationString().Encode(), + }, + ScalarObjectPath: schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{ + OperatorStartsWith: schema.NewComparisonOperatorCustom(schema.NewNamedType(ScalarObjectPath)).Encode(), + }, + Representation: schema.NewTypeRepresentationString().Encode(), + }, + ScalarFilterTimestamp: schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{ + OperatorGreater: schema.NewComparisonOperatorCustom(schema.NewNamedType("TimestampTZ")).Encode(), + }, + Representation: schema.NewTypeRepresentationTimestampTZ().Encode(), + }, + ScalarStorageClientID: schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{ + OperatorEqual: schema.NewComparisonOperatorEqual().Encode(), + }, + Representation: schema.NewTypeRepresentationEnum(clientIDs).Encode(), + }, + }, + } +} diff --git a/connector/internal/types.go b/connector/internal/types.go new file mode 100644 index 0000000..e8b73dc --- /dev/null +++ b/connector/internal/types.go @@ -0,0 +1,13 @@ +package internal + +// ColumnOrder the structured sorting columns. +type ColumnOrder struct { + Name string + Descending bool +} + +// StringComparisonOperator represents the explicit comparison expression for string columns. +type StringComparisonOperator struct { + Value string + Operator string +} diff --git a/connector/internal/utils.go b/connector/internal/utils.go new file mode 100644 index 0000000..f5c005d --- /dev/null +++ b/connector/internal/utils.go @@ -0,0 +1,46 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" +) + +func getComparisonValue(input schema.ComparisonValue, variables map[string]any) (any, error) { + if len(input) == 0 { + return nil, nil + } + + switch v := input.Interface().(type) { + case *schema.ComparisonValueScalar: + return v.Value, nil + case *schema.ComparisonValueVariable: + if value, ok := variables[v.Name]; ok { + return value, nil + } + + return nil, fmt.Errorf("variable %s does not exist", v.Name) + default: + return nil, fmt.Errorf("invalid comparison value: %v", input) + } +} + +func getComparisonValueString(input schema.ComparisonValue, variables map[string]any) (*string, error) { + rawValue, err := getComparisonValue(input, variables) + if err != nil { + return nil, err + } + + return utils.DecodeNullableString(rawValue) +} + +func getComparisonValueDateTime(input schema.ComparisonValue, variables map[string]any) (*time.Time, error) { + rawValue, err := getComparisonValue(input, variables) + if err != nil { + return nil, err + } + + return utils.DecodeNullableDateTime(rawValue) +} diff --git a/connector/mutation.go b/connector/mutation.go new file mode 100644 index 0000000..52b4cee --- /dev/null +++ b/connector/mutation.go @@ -0,0 +1,105 @@ +package connector + +import ( + "context" + "fmt" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/types" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "golang.org/x/sync/errgroup" +) + +// MutationExplain explains a mutation by creating an execution plan. +func (c *Connector) MutationExplain(ctx context.Context, configuration *types.Configuration, state *types.State, request *schema.MutationRequest) (*schema.ExplainResponse, error) { + return nil, schema.NotSupportedError("mutation explain has not been supported yet", nil) +} + +// Mutation executes a mutation. +func (c *Connector) Mutation(ctx context.Context, configuration *types.Configuration, state *types.State, request *schema.MutationRequest) (*schema.MutationResponse, error) { + concurrencyLimit := c.config.Concurrency.Mutation + if len(request.Operations) <= 1 || concurrencyLimit <= 1 { + return c.execMutationSync(ctx, state, request) + } + + return c.execMutationAsync(ctx, state, request) +} + +func (c *Connector) execMutationSync(ctx context.Context, state *types.State, request *schema.MutationRequest) (*schema.MutationResponse, error) { + operationResults := make([]schema.MutationOperationResults, len(request.Operations)) + + for i, operation := range request.Operations { + result, err := c.execMutation(ctx, state, operation, i) + if err != nil { + return nil, err + } + + operationResults[i] = result + } + + return &schema.MutationResponse{ + OperationResults: operationResults, + }, nil +} + +func (c *Connector) execMutationAsync(ctx context.Context, state *types.State, request *schema.MutationRequest) (*schema.MutationResponse, error) { + operationResults := make([]schema.MutationOperationResults, len(request.Operations)) + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(c.config.Concurrency.Mutation) + + for i, operation := range request.Operations { + func(index int, op schema.MutationOperation) { + eg.Go(func() error { + result, err := c.execMutation(ctx, state, op, index) + if err != nil { + return err + } + + operationResults[index] = result + + return nil + }) + }(i, operation) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + return &schema.MutationResponse{ + OperationResults: operationResults, + }, nil +} + +func (c *Connector) execMutation(ctx context.Context, state *types.State, operation schema.MutationOperation, index int) (schema.MutationOperationResults, error) { + ctx, span := state.Tracer.Start(ctx, fmt.Sprintf("Execute Procedure %d", index)) + defer span.End() + + span.SetAttributes( + attribute.String("operation.type", string(operation.Type)), + attribute.String("operation.name", operation.Name), + ) + + switch operation.Type { + case schema.MutationOperationProcedure: + result, err := c.execProcedure(ctx, state, &operation) + if err != nil { + span.SetStatus(codes.Error, fmt.Sprintf("failed to execute procedure %d", index)) + span.RecordError(err) + + return nil, err + } + + return result, nil + default: + errorMsg := fmt.Sprintf("invalid operation type: %s", operation.Type) + span.SetStatus(codes.Error, errorMsg) + + return nil, schema.UnprocessableContentError(errorMsg, nil) + } +} + +func (c *Connector) execProcedure(ctx context.Context, state *types.State, operation *schema.MutationOperation) (schema.MutationOperationResults, error) { + return c.apiHandler.Mutation(ctx, state, operation) +} diff --git a/connector/query.go b/connector/query.go new file mode 100644 index 0000000..b581385 --- /dev/null +++ b/connector/query.go @@ -0,0 +1,110 @@ +package connector + +import ( + "context" + "fmt" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/internal" + "github.com/hasura/ndc-storage/connector/types" + "go.opentelemetry.io/otel/codes" + "golang.org/x/sync/errgroup" +) + +// QueryExplain explains a query by creating an execution plan. +func (c *Connector) QueryExplain(ctx context.Context, configuration *types.Configuration, state *types.State, request *schema.QueryRequest) (*schema.ExplainResponse, error) { + return nil, schema.NotSupportedError("query explain has not been supported yet", nil) +} + +// Query executes a query. +func (c *Connector) Query(ctx context.Context, configuration *types.Configuration, state *types.State, request *schema.QueryRequest) (schema.QueryResponse, error) { + requestVars := request.Variables + if len(requestVars) == 0 { + requestVars = []schema.QueryRequestVariablesElem{make(schema.QueryRequestVariablesElem)} + } + + concurrencyLimit := c.config.Concurrency.Query + if concurrencyLimit <= 1 || len(request.Variables) <= 1 { + return c.execQuerySync(ctx, state, request, requestVars) + } + + return c.execQueryAsync(ctx, state, request, requestVars) +} + +func (c *Connector) execQuerySync(ctx context.Context, state *types.State, req *schema.QueryRequest, requestVars []schema.QueryRequestVariablesElem) (schema.QueryResponse, error) { + rowSets := make([]schema.RowSet, len(requestVars)) + + for i, requestVar := range requestVars { + result, err := c.execQuery(ctx, state, req, requestVar, i) + if err != nil { + return nil, err + } + + rowSets[i] = *result + } + + return rowSets, nil +} + +func (c *Connector) execQueryAsync(ctx context.Context, state *types.State, request *schema.QueryRequest, requestVars []schema.QueryRequestVariablesElem) (schema.QueryResponse, error) { + rowSets := make([]schema.RowSet, len(requestVars)) + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(c.config.Concurrency.Query) + + for i, requestVar := range requestVars { + func(index int, vars schema.QueryRequestVariablesElem) { + eg.Go(func() error { + result, err := c.execQuery(ctx, state, request, vars, index) + if err != nil { + return err + } + + rowSets[index] = *result + + return nil + }) + }(i, requestVar) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + return rowSets, nil +} + +func (c *Connector) execQuery(ctx context.Context, state *types.State, request *schema.QueryRequest, variables map[string]any, index int) (*schema.RowSet, error) { + ctx, span := state.Tracer.Start(ctx, fmt.Sprintf("Execute Query %d", index)) + defer span.End() + + rawArgs, err := utils.ResolveArgumentVariables(request.Arguments, variables) + if err != nil { + span.SetStatus(codes.Error, "failed to resolve argument variables") + span.RecordError(err) + + return nil, schema.UnprocessableContentError("failed to resolve argument variables", map[string]any{ + "cause": err.Error(), + }) + } + + if request.Collection == internal.CollectionStorageObject { + executor := internal.CollectionObjectExecutor{ + Storage: state.Storage, + Request: request, + Arguments: rawArgs, + } + + return executor.Execute(ctx) + } + + result, err := c.apiHandler.Query(ctx, state, request, rawArgs) + if err != nil { + span.SetStatus(codes.Error, fmt.Sprintf("failed to execute function %d", index)) + span.RecordError(err) + + return nil, err + } + + return result, nil +} diff --git a/connector/schema.generated.go b/connector/schema.generated.go new file mode 100644 index 0000000..6aea8c0 --- /dev/null +++ b/connector/schema.generated.go @@ -0,0 +1,1935 @@ +// Code generated by github.com/hasura/ndc-sdk-go/cmd/hasura-ndc-go, DO NOT EDIT. +package connector + +import ( + "github.com/hasura/ndc-sdk-go/schema" +) + +func toPtr[V any](value V) *V { + return &value +} + +// GetConnectorSchema gets the generated connector schema +func GetConnectorSchema() *schema.SchemaResponse { + return &schema.SchemaResponse{ + Collections: []schema.CollectionInfo{}, + ObjectTypes: schema.SchemaResponseObjectTypes{ + "AbortIncompleteMultipartUpload": schema.ObjectType{ + Description: toPtr("structure, not supported yet on MinIO"), + Fields: schema.ObjectTypeFields{ + "daysAfterInitiation": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + "BucketLifecycleConfiguration": schema.ObjectType{ + Description: toPtr("is a collection of lifecycle Rule objects."), + Fields: schema.ObjectTypeFields{ + "rules": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("BucketLifecycleRule")).Encode(), + }, + }, + }, + "BucketLifecycleRule": schema.ObjectType{ + Description: toPtr("represents a single rule in lifecycle configuration"), + Fields: schema.ObjectTypeFields{ + "abortIncompleteMultipartUpload": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("AbortIncompleteMultipartUpload")).Encode(), + }, + "allVersionsExpiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleAllVersionsExpiration")).Encode(), + }, + "delMarkerExpiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleDelMarkerExpiration")).Encode(), + }, + "expiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleExpiration")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleFilter")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "noncurrentVersionExpiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleNoncurrentVersionExpiration")).Encode(), + }, + "noncurrentVersionTransition": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleNoncurrentVersionTransition")).Encode(), + }, + "prefix": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "status": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "transition": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleTransition")).Encode(), + }, + }, + }, + "DeleteMarkerReplication": schema.ObjectType{ + Description: toPtr("whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html"), + Fields: schema.ObjectTypeFields{ + "status": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationRuleStatus").Encode(), + }, + }, + }, + "DeleteReplication": schema.ObjectType{ + Description: toPtr("whether versioned deletes are replicated. This is a MinIO specific extension"), + Fields: schema.ObjectTypeFields{ + "status": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationRuleStatus").Encode(), + }, + }, + }, + "ExistingObjectReplication": schema.ObjectType{ + Description: toPtr("whether existing object replication is enabled"), + Fields: schema.ObjectTypeFields{ + "status": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationRuleStatus").Encode(), + }, + }, + }, + "LifecycleAllVersionsExpiration": schema.ObjectType{ + Description: toPtr("represents AllVersionsExpiration actions element in an ILM policy"), + Fields: schema.ObjectTypeFields{ + "days": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "deleteMarker": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + }, + }, + "LifecycleDelMarkerExpiration": schema.ObjectType{ + Description: toPtr("represents DelMarkerExpiration actions element in an ILM policy"), + Fields: schema.ObjectTypeFields{ + "days": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + "LifecycleExpiration": schema.ObjectType{ + Description: toPtr("expiration details of lifecycle configuration"), + Fields: schema.ObjectTypeFields{ + "date": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Date")).Encode(), + }, + "days": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "expiredObjectAllVersions": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "expiredObjectDeleteMarker": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + }, + }, + "LifecycleFilter": schema.ObjectType{ + Description: toPtr("will be used in selecting rule(s) for lifecycle configuration"), + Fields: schema.ObjectTypeFields{ + "and": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("LifecycleFilterAnd")).Encode(), + }, + "objectSizeGreaterThan": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "objectSizeLessThan": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "prefix": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "tag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageTag")).Encode(), + }, + }, + }, + "LifecycleFilterAnd": schema.ObjectType{ + Description: toPtr("the And Rule for LifecycleTag, to be used in LifecycleRuleFilter"), + Fields: schema.ObjectTypeFields{ + "objectSizeGreaterThan": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "objectSizeLessThan": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "prefix": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "tags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("StorageTag"))).Encode(), + }, + }, + }, + "LifecycleNoncurrentVersionExpiration": schema.ObjectType{ + Description: toPtr("- Specifies when noncurrent object versions expire. Upon expiration, server permanently deletes the noncurrent object versions. Set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request server delete noncurrent object versions at a specific period in the object's lifetime."), + Fields: schema.ObjectTypeFields{ + "newerNoncurrentVersions": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "noncurrentDays": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + "LifecycleNoncurrentVersionTransition": schema.ObjectType{ + Description: toPtr("sets this action to request server to transition noncurrent object versions to different set storage classes at a specific period in the object's lifetime."), + Fields: schema.ObjectTypeFields{ + "newerNoncurrentVersions": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "noncurrentDays": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "LifecycleTransition": schema.ObjectType{ + Description: toPtr("transition details of lifecycle configuration"), + Fields: schema.ObjectTypeFields{ + "date": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Date")).Encode(), + }, + "days": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "ListStorageObjectsOptions": schema.ObjectType{ + Description: toPtr("holds all options of a list object request."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "maxKeys": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "prefix": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "recursive": schema.ObjectField{ + Type: schema.NewNamedType("Boolean").Encode(), + }, + "startAfter": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "withMetadata": schema.ObjectField{ + Type: schema.NewNamedType("Boolean").Encode(), + }, + "withVersions": schema.ObjectField{ + Type: schema.NewNamedType("Boolean").Encode(), + }, + }, + }, + "NotificationCommonConfig": schema.ObjectType{ + Description: toPtr("- represents one single notification configuration such as topic, queue or lambda configuration."), + Fields: schema.ObjectTypeFields{ + "arn": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "event": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("String")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("NotificationFilter")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "NotificationConfig": schema.ObjectType{ + Description: toPtr("the struct that represents a notification configration object."), + Fields: schema.ObjectTypeFields{ + "cloudFunctionConfigurations": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("NotificationLambdaConfig")).Encode(), + }, + "queueConfigurations": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("NotificationQueueConfig")).Encode(), + }, + "topicConfigurations": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("NotificationTopicConfig")).Encode(), + }, + }, + }, + "NotificationFilter": schema.ObjectType{ + Description: toPtr("- a tag in the notification xml structure which carries suffix/prefix filters"), + Fields: schema.ObjectTypeFields{ + "s3Key": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("NotificationS3Key")).Encode(), + }, + }, + }, + "NotificationFilterRule": schema.ObjectType{ + Description: toPtr("child of S3Key, a tag in the notification xml which carries suffix/prefix filters"), + Fields: schema.ObjectTypeFields{ + "name": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "value": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "NotificationLambdaConfig": schema.ObjectType{ + Description: toPtr("carries one single cloudfunction notification configuration"), + Fields: schema.ObjectTypeFields{ + "arn": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "cloudFunction": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "event": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("String")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("NotificationFilter")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "NotificationQueueConfig": schema.ObjectType{ + Description: toPtr("carries one single queue notification configuration"), + Fields: schema.ObjectTypeFields{ + "arn": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "event": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("String")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("NotificationFilter")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "queue": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "NotificationS3Key": schema.ObjectType{ + Description: toPtr("child of Filter, a tag in the notification xml which carries suffix/prefix filters"), + Fields: schema.ObjectTypeFields{ + "filterRule": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("NotificationFilterRule"))).Encode(), + }, + }, + }, + "NotificationTopicConfig": schema.ObjectType{ + Description: toPtr("carries one single topic notification configuration"), + Fields: schema.ObjectTypeFields{ + "arn": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "event": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("String")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("NotificationFilter")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "topic": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "PresignedURLResponse": schema.ObjectType{ + Description: toPtr("holds the presigned URL and expiry information."), + Fields: schema.ObjectTypeFields{ + "expiredAt": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "url": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "PutStorageObjectArguments": schema.ObjectType{ + Description: toPtr("represents input arguments of the PutObject method."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "options": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("PutStorageObjectOptions")).Encode(), + }, + }, + }, + "PutStorageObjectOptions": schema.ObjectType{ + Description: toPtr("represents options specified by user for PutObject call."), + Fields: schema.ObjectTypeFields{ + "autoChecksum": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("ChecksumType")).Encode(), + }, + "cacheControl": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksum": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("ChecksumType")).Encode(), + }, + "concurrentStreamParts": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "contentDisposition": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "contentEncoding": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "contentLanguage": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "contentType": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "disableContentSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "disableMultipart": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "expires": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "legalHold": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageLegalHoldStatus")).Encode(), + }, + "mode": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "numThreads": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "partSize": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "retainUntilDate": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "sendContentMd5": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "userMetadata": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "userTags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "websiteRedirectLocation": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "RemoveStorageObjectError": schema.ObjectType{ + Description: toPtr("the container of Multi Delete S3 API error."), + Fields: schema.ObjectTypeFields{ + "error": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "objectName": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "ReplicaModifications": schema.ObjectType{ + Description: toPtr("specifies if replica modification sync is enabled"), + Fields: schema.ObjectTypeFields{ + "status": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationRuleStatus").Encode(), + }, + }, + }, + "ServerSideEncryptionConfiguration": schema.ObjectType{ + Description: toPtr("is the default encryption configuration structure."), + Fields: schema.ObjectTypeFields{ + "rules": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("ServerSideEncryptionRule")).Encode(), + }, + }, + }, + "ServerSideEncryptionRule": schema.ObjectType{ + Description: toPtr("rule layer encapsulates default encryption configuration"), + Fields: schema.ObjectTypeFields{ + "apply": schema.ObjectField{ + Type: schema.NewNamedType("StorageApplySSEByDefault").Encode(), + }, + }, + }, + "SetStorageObjectLockConfig": schema.ObjectType{ + Description: toPtr("represents the object lock configuration options in given bucket"), + Fields: schema.ObjectTypeFields{ + "mode": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "unit": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionValidityUnit")).Encode(), + }, + "validity": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + "SourceSelectionCriteria": schema.ObjectType{ + Description: toPtr("specifies additional source selection criteria in ReplicationConfiguration."), + Fields: schema.ObjectTypeFields{ + "replicaModifications": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("ReplicaModifications")).Encode(), + }, + }, + }, + "StorageApplySSEByDefault": schema.ObjectType{ + Description: toPtr("defines default encryption configuration, KMS or SSE. To activate KMS, SSEAlgoritm needs to be set to `aws:kms“. Minio currently does not support Kms."), + Fields: schema.ObjectTypeFields{ + "kmsMasterKeyId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "sseAlgorithm": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "StorageBucketArguments": schema.ObjectType{ + Description: toPtr("represent the common input arguments for bucket-related methods."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + "StorageBucketInfo": schema.ObjectType{ + Description: toPtr("container for bucket metadata."), + Fields: schema.ObjectTypeFields{ + "creationDate": schema.ObjectField{ + Type: schema.NewNamedType("TimestampTZ").Encode(), + }, + "name": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "StorageBucketVersioningConfiguration": schema.ObjectType{ + Description: toPtr("is the versioning configuration structure"), + Fields: schema.ObjectTypeFields{ + "excludeFolders": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "excludedPrefixes": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("String"))).Encode(), + }, + "mfaDelete": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "status": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageCopyDestOptions": schema.ObjectType{ + Description: toPtr("represents options specified by user for CopyObject/ComposeObject APIs."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "legalHold": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageLegalHoldStatus")).Encode(), + }, + "mode": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "object": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "replaceMetadata": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "replaceTags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "retainUntilDate": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "userMetadata": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "userTags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + }, + }, + "StorageCopySrcOptions": schema.ObjectType{ + Description: toPtr("represents a source object to be copied, using server-side copying APIs."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "end": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "matchETag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "matchModifiedSince": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "matchRange": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "matchUnmodifiedSince": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "noMatchETag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "object": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "start": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "versionId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageGrant": schema.ObjectType{ + Description: toPtr("holds grant information."), + Fields: schema.ObjectTypeFields{ + "grantee": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageGrantee")).Encode(), + }, + "permission": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageGrantee": schema.ObjectType{ + Description: toPtr("represents the person being granted permissions."), + Fields: schema.ObjectTypeFields{ + "displayName": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "uri": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageObject": schema.ObjectType{ + Description: toPtr("container for object metadata."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "checksumCrc32": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc32C": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc64Nvme": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha1": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "contentType": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "etag": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "expiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "expirationRuleId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "expires": schema.ObjectField{ + Type: schema.NewNamedType("TimestampTZ").Encode(), + }, + "grant": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("StorageGrant"))).Encode(), + }, + "isDeleteMarker": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "isLatest": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "lastModified": schema.ObjectField{ + Type: schema.NewNamedType("TimestampTZ").Encode(), + }, + "metadata": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "name": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "owner": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageOwner")).Encode(), + }, + "replicationReady": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "replicationStatus": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageObjectReplicationStatus")).Encode(), + }, + "restore": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRestoreInfo")).Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNamedType("Int64").Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "userMetadata": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "userTagCount": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "userTags": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "versionId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageObjectAttributePart": schema.ObjectType{ + Fields: schema.ObjectTypeFields{ + "checksumCrc32": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc32C": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc64Nvme": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha1": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "partNumber": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + }, + }, + "StorageObjectAttributes": schema.ObjectType{ + Description: toPtr("is the response object returned by the GetObjectAttributes API."), + Fields: schema.ObjectTypeFields{ + "checksum": schema.ObjectField{ + Type: schema.NewNamedType("StorageObjectChecksum").Encode(), + }, + "etag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "lastModified": schema.ObjectField{ + Type: schema.NewNamedType("TimestampTZ").Encode(), + }, + "objectParts": schema.ObjectField{ + Type: schema.NewNamedType("StorageObjectParts").Encode(), + }, + "objectSize": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageObjectAttributesResponse": schema.ObjectType{ + Description: toPtr("contains details returned by the GetObjectAttributes API."), + Fields: schema.ObjectTypeFields{ + "checksum": schema.ObjectField{ + Type: schema.NewNamedType("StorageObjectChecksum").Encode(), + }, + "etag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "objectParts": schema.ObjectField{ + Type: schema.NewNamedType("StorageObjectParts").Encode(), + }, + "objectSize": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + "StorageObjectChecksum": schema.ObjectType{ + Description: toPtr("represents checksum values of the object."), + Fields: schema.ObjectTypeFields{ + "checksumCrc32": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc32C": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc64Nvme": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha1": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageObjectLockConfig": schema.ObjectType{ + Fields: schema.ObjectTypeFields{ + "mode": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "objectLock": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "unit": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionValidityUnit")).Encode(), + }, + "validity": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + "StorageObjectMultipartInfo": schema.ObjectType{ + Description: toPtr("container for multipart object metadata."), + Fields: schema.ObjectTypeFields{ + "initiated": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "key": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("Int64")).Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "uploadId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageObjectParts": schema.ObjectType{ + Fields: schema.ObjectTypeFields{ + "isTruncated": schema.ObjectField{ + Type: schema.NewNamedType("Boolean").Encode(), + }, + "maxParts": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "nextPartNumberMarker": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "partNumberMarker": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "parts": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNullableType(schema.NewNamedType("StorageObjectAttributePart"))).Encode(), + }, + "partsCount": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + }, + }, + "StorageOwner": schema.ObjectType{ + Description: toPtr("name."), + Fields: schema.ObjectTypeFields{ + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "name": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageReplicationConfig": schema.ObjectType{ + Description: toPtr("replication configuration specified in https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html"), + Fields: schema.ObjectTypeFields{ + "role": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "rules": schema.ObjectField{ + Type: schema.NewArrayType(schema.NewNamedType("StorageReplicationRule")).Encode(), + }, + }, + }, + "StorageReplicationDestination": schema.ObjectType{ + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "storageClass": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageReplicationFilter": schema.ObjectType{ + Description: toPtr("a filter for a replication configuration Rule."), + Fields: schema.ObjectTypeFields{ + "and": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageReplicationFilterAnd")).Encode(), + }, + "rrefix": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "tag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageTag")).Encode(), + }, + }, + }, + "StorageReplicationFilterAnd": schema.ObjectType{ + Description: toPtr("- a tag to combine a prefix and multiple tags for replication configuration rule."), + Fields: schema.ObjectTypeFields{ + "rrefix": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "tag": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewArrayType(schema.NewNamedType("StorageTag"))).Encode(), + }, + }, + }, + "StorageReplicationRule": schema.ObjectType{ + Description: toPtr("a rule for replication configuration."), + Fields: schema.ObjectTypeFields{ + "deleteMarkerReplication": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("DeleteMarkerReplication")).Encode(), + }, + "deleteReplication": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("DeleteReplication")).Encode(), + }, + "destination": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("StorageReplicationDestination")).Encode(), + }, + "existingObjectReplication": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("ExistingObjectReplication")).Encode(), + }, + "filter": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationFilter").Encode(), + }, + "id": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "priority": schema.ObjectField{ + Type: schema.NewNamedType("Int32").Encode(), + }, + "sourceSelectionCriteria": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("SourceSelectionCriteria")).Encode(), + }, + "status": schema.ObjectField{ + Type: schema.NewNamedType("StorageReplicationRuleStatus").Encode(), + }, + }, + }, + "StorageRestoreInfo": schema.ObjectType{ + Description: toPtr("contains information of the restore operation of an archived object."), + Fields: schema.ObjectTypeFields{ + "expiryTime": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "ongoingRestore": schema.ObjectField{ + Type: schema.NewNamedType("Boolean").Encode(), + }, + }, + }, + "StorageTag": schema.ObjectType{ + Description: toPtr("structure key/value pair representing an object tag to apply configuration"), + Fields: schema.ObjectTypeFields{ + "key": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "value": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + "StorageUploadInfo": schema.ObjectType{ + Description: toPtr("represents the information of the uploaded object."), + Fields: schema.ObjectTypeFields{ + "bucket": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "checksumCrc32": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc32C": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumCrc64Nvme": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha1": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksumSha256": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "etag": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "expiration": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "expirationRuleId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "lastModified": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "location": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "name": schema.ObjectField{ + Type: schema.NewNamedType("String").Encode(), + }, + "size": schema.ObjectField{ + Type: schema.NewNamedType("Int64").Encode(), + }, + "versionId": schema.ObjectField{ + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + }, + Functions: []schema.FunctionInfo{ + { + Name: "downloadStorageObject", + Description: toPtr("returns a stream of the object data. Most of the common errors occur when reading the stream."), + ResultType: schema.NewNullableType(schema.NewNamedType("Bytes")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksum": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "headers": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "partNumber": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "requestParams": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "downloadStorageObjectText", + Description: toPtr("returns the object content in plain text. Use this function only if you know exactly the file as an text file."), + ResultType: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksum": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "headers": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "partNumber": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "requestParams": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "storageBucketEncryption", + Description: toPtr("gets default encryption configuration set on a bucket."), + ResultType: schema.NewNullableType(schema.NewNamedType("ServerSideEncryptionConfiguration")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketExists", + Description: toPtr("checks if a bucket exists."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketLifecycle", + Description: toPtr("gets lifecycle on a bucket or a prefix."), + ResultType: schema.NewNullableType(schema.NewNamedType("BucketLifecycleConfiguration")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketNotification", + Description: toPtr("gets notification configuration on a bucket."), + ResultType: schema.NewNullableType(schema.NewNamedType("NotificationConfig")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketPolicy", + Description: toPtr("gets access permissions on a bucket or a prefix."), + ResultType: schema.NewNamedType("String").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketReplication", + ResultType: schema.NewNullableType(schema.NewNamedType("StorageReplicationConfig")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketTags", + Description: toPtr("gets tags of a bucket."), + ResultType: schema.NewNamedType("JSON").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBucketVersioning", + Description: toPtr("gets versioning configuration set on a bucket."), + ResultType: schema.NewNullableType(schema.NewNamedType("StorageBucketVersioningConfiguration")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageBuckets", + Description: toPtr("list all buckets."), + ResultType: schema.NewArrayType(schema.NewNamedType("StorageBucketInfo")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageIncompleteUploads", + Description: toPtr("list partially uploaded objects in a bucket."), + ResultType: schema.NewArrayType(schema.NewNamedType("StorageObjectMultipartInfo")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "prefix": { + Type: schema.NewNamedType("String").Encode(), + }, + "recursive": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + }, + }, + { + Name: "storageObject", + Description: toPtr("fetches metadata of an object."), + ResultType: schema.NewNullableType(schema.NewNamedType("StorageObject")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "checksum": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "headers": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "partNumber": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "requestParams": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "storageObjectAttributes", + Description: toPtr("returns a stream of the object data. Most of the common errors occur when reading the stream."), + ResultType: schema.NewNullableType(schema.NewNamedType("StorageObjectAttributes")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "maxParts": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "partNumberMarker": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "storageObjectLegalHold", + Description: toPtr("returns legal-hold status on a given object."), + ResultType: schema.NewNamedType("StorageLegalHoldStatus").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "storageObjectLockConfig", + Description: toPtr("gets object lock configuration of given bucket."), + ResultType: schema.NewNullableType(schema.NewNamedType("StorageObjectLockConfig")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "storageObjectTags", + Description: toPtr("fetches Object Tags from the given object."), + ResultType: schema.NewNamedType("JSON").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "storagePresignedDownloadUrl", + Description: toPtr("generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second."), + ResultType: schema.NewNamedType("PresignedURLResponse").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "expiry": { + Type: schema.NewNullableType(schema.NewNamedType("Duration")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "requestParams": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + }, + }, + { + Name: "storagePresignedHeadUrl", + Description: toPtr("generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days."), + ResultType: schema.NewNamedType("PresignedURLResponse").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "expiry": { + Type: schema.NewNullableType(schema.NewNamedType("Duration")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "requestParams": { + Type: schema.NewNullableType(schema.NewNamedType("JSON")).Encode(), + }, + }, + }, + { + Name: "storagePresignedUploadUrl", + Description: toPtr("generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days."), + ResultType: schema.NewNamedType("PresignedURLResponse").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "expiry": { + Type: schema.NewNullableType(schema.NewNamedType("Duration")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + }, + Procedures: []schema.ProcedureInfo{ + { + Name: "composeStorageObject", + Description: toPtr("creates an object by concatenating a list of source objects using server-side copying."), + ResultType: schema.NewNamedType("StorageUploadInfo").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "dest": { + Type: schema.NewNamedType("StorageCopyDestOptions").Encode(), + }, + "sources": { + Type: schema.NewArrayType(schema.NewNamedType("StorageCopySrcOptions")).Encode(), + }, + }, + }, + { + Name: "copyStorageObject", + Description: toPtr("creates or replaces an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. To copy multiple source objects into a single destination object see the ComposeObject API."), + ResultType: schema.NewNamedType("StorageUploadInfo").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "dest": { + Type: schema.NewNamedType("StorageCopyDestOptions").Encode(), + }, + "source": { + Type: schema.NewNamedType("StorageCopySrcOptions").Encode(), + }, + }, + }, + { + Name: "createStorageBucket", + Description: toPtr("creates a new bucket."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "name": { + Type: schema.NewNamedType("String").Encode(), + }, + "objectLocking": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "region": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "enableStorageBucketVersioning", + Description: toPtr("enables bucket versioning support."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "putStorageObjectLegalHold", + Description: toPtr("applies legal-hold onto an object."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "status": { + Type: schema.NewNullableType(schema.NewNamedType("StorageLegalHoldStatus")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "putStorageObjectRetention", + Description: toPtr("applies object retention lock onto an object."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "governanceBypass": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "mode": { + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "retainUntilDate": { + Type: schema.NewNullableType(schema.NewNamedType("TimestampTZ")).Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "putStorageObjectTags", + Description: toPtr("sets new object Tags to the given object, replaces/overwrites any existing tags."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "tags": { + Type: schema.NewNamedType("JSON").Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "removeIncompleteStorageUpload", + Description: toPtr("removes a partially uploaded object."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + }, + }, + { + Name: "removeStorageBucket", + Description: toPtr("removes a bucket, bucket should be empty to be successfully removed."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "removeStorageBucketReplication", + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "removeStorageBucketTags", + Description: toPtr("removes all tags on a bucket."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "removeStorageObject", + Description: toPtr("removes an object with some specified options."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "forceDelete": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "governanceBypass": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "removeStorageObjectTags", + Description: toPtr("removes Object Tags from the given object."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "versionId": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + }, + }, + { + Name: "removeStorageObjects", + Description: toPtr("remove a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel."), + ResultType: schema.NewArrayType(schema.NewNamedType("RemoveStorageObjectError")).Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "governanceBypass": { + Type: schema.NewNullableType(schema.NewNamedType("Boolean")).Encode(), + }, + "maxKeys": { + Type: schema.NewNamedType("Int32").Encode(), + }, + "prefix": { + Type: schema.NewNamedType("String").Encode(), + }, + "recursive": { + Type: schema.NewNamedType("Boolean").Encode(), + }, + "startAfter": { + Type: schema.NewNamedType("String").Encode(), + }, + "withMetadata": { + Type: schema.NewNamedType("Boolean").Encode(), + }, + "withVersions": { + Type: schema.NewNamedType("Boolean").Encode(), + }, + }, + }, + { + Name: "setStorageBucketEncryption", + Description: toPtr("sets default encryption configuration on a bucket."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "rules": { + Type: schema.NewArrayType(schema.NewNamedType("ServerSideEncryptionRule")).Encode(), + }, + }, + }, + { + Name: "setStorageBucketLifecycle", + Description: toPtr("sets lifecycle on bucket or an object prefix."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "rules": { + Type: schema.NewArrayType(schema.NewNamedType("BucketLifecycleRule")).Encode(), + }, + }, + }, + { + Name: "setStorageBucketNotification", + Description: toPtr("sets a new notification configuration on a bucket."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "cloudFunctionConfigurations": { + Type: schema.NewArrayType(schema.NewNamedType("NotificationLambdaConfig")).Encode(), + }, + "queueConfigurations": { + Type: schema.NewArrayType(schema.NewNamedType("NotificationQueueConfig")).Encode(), + }, + "topicConfigurations": { + Type: schema.NewArrayType(schema.NewNamedType("NotificationTopicConfig")).Encode(), + }, + }, + }, + { + Name: "setStorageBucketReplication", + Description: toPtr("sets replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO to associate the source and destination buckets for replication with the replication endpoint."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "role": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "rules": { + Type: schema.NewArrayType(schema.NewNamedType("StorageReplicationRule")).Encode(), + }, + }, + }, + { + Name: "setStorageBucketTags", + Description: toPtr("sets tags to a bucket."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "tags": { + Type: schema.NewNamedType("JSON").Encode(), + }, + }, + }, + { + Name: "setStorageObjectLockConfig", + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "mode": { + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionMode")).Encode(), + }, + "unit": { + Type: schema.NewNullableType(schema.NewNamedType("StorageRetentionValidityUnit")).Encode(), + }, + "validity": { + Type: schema.NewNullableType(schema.NewNamedType("Int32")).Encode(), + }, + }, + }, + { + Name: "suspendStorageBucketVersioning", + Description: toPtr("disables bucket versioning support."), + ResultType: schema.NewNamedType("Boolean").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + }, + }, + { + Name: "uploadStorageObject", + Description: toPtr("uploads object that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB."), + ResultType: schema.NewNamedType("StorageUploadInfo").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "data": { + Type: schema.NewNamedType("Bytes").Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "options": { + Type: schema.NewNullableType(schema.NewNamedType("PutStorageObjectOptions")).Encode(), + }, + }, + }, + { + Name: "uploadStorageObjectText", + Description: toPtr("uploads object in plain text to the storage server. The file content is not encoded to base64 so the input size is smaller than 30%."), + ResultType: schema.NewNamedType("StorageUploadInfo").Encode(), + Arguments: map[string]schema.ArgumentInfo{ + "bucket": { + Type: schema.NewNullableType(schema.NewNamedType("String")).Encode(), + }, + "clientId": { + Type: schema.NewNullableType(schema.NewNamedType("StorageClientID")).Encode(), + }, + "data": { + Type: schema.NewNamedType("String").Encode(), + }, + "object": { + Type: schema.NewNamedType("String").Encode(), + }, + "options": { + Type: schema.NewNullableType(schema.NewNamedType("PutStorageObjectOptions")).Encode(), + }, + }, + }, + }, + ScalarTypes: schema.SchemaResponseScalarTypes{ + "Boolean": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationBoolean().Encode(), + }, + "Bytes": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationBytes().Encode(), + }, + "ChecksumType": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"SHA256", "SHA1", "CRC32", "CRC32C", "CRC64NVME", "FullObjectCRC32", "FullObjectCRC32C", "None"}).Encode(), + }, + "Date": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationDate().Encode(), + }, + "Duration": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationJSON().Encode(), + }, + "Int32": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationInt32().Encode(), + }, + "Int64": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationInt64().Encode(), + }, + "JSON": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationJSON().Encode(), + }, + "StorageClientID": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationString().Encode(), + }, + "StorageLegalHoldStatus": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"ON", "OFF"}).Encode(), + }, + "StorageObjectReplicationStatus": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"COMPLETED", "PENDING", "FAILED", "REPLICA"}).Encode(), + }, + "StorageReplicationRuleStatus": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"Enabled", "Disabled"}).Encode(), + }, + "StorageRetentionMode": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"GOVERNANCE", "COMPLIANCE"}).Encode(), + }, + "StorageRetentionValidityUnit": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationEnum([]string{"DAYS", "YEARS"}).Encode(), + }, + "String": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationString().Encode(), + }, + "TimestampTZ": schema.ScalarType{ + AggregateFunctions: schema.ScalarTypeAggregateFunctions{}, + ComparisonOperators: map[string]schema.ComparisonOperatorDefinition{}, + Representation: schema.NewTypeRepresentationTimestampTZ().Encode(), + }, + }, + } +} diff --git a/connector/storage/bucket.go b/connector/storage/bucket.go new file mode 100644 index 0000000..6253c11 --- /dev/null +++ b/connector/storage/bucket.go @@ -0,0 +1,253 @@ +package storage + +import ( + "context" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" +) + +// GetBucketPolicy gets access permissions on a bucket or a prefix. +func (m *Manager) GetBucketPolicy(ctx context.Context, args *common.StorageBucketArguments) (string, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return "", err + } + + return client.GetBucketPolicy(ctx, bucketName) +} + +// GetBucketNotification gets notification configuration on a bucket. +func (m *Manager) GetBucketNotification(ctx context.Context, args *common.StorageBucketArguments) (*common.NotificationConfig, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketNotification(ctx, bucketName) +} + +// SetBucketNotification sets a new bucket notification on a bucket. +func (m *Manager) SetBucketNotification(ctx context.Context, args *common.SetBucketNotificationArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SetBucketNotification(ctx, bucketName, args.NotificationConfig) +} + +// Remove all configured bucket notifications on a bucket. +func (m *Manager) RemoveAllBucketNotification(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.RemoveAllBucketNotification(ctx, bucketName) +} + +// MakeBucket creates a new bucket. +func (m *Manager) MakeBucket(ctx context.Context, args *common.MakeStorageBucketOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Name) + if err != nil { + return err + } + + args.Name = bucketName + + return client.MakeBucket(ctx, args) +} + +// ListBuckets list all buckets. +func (m *Manager) ListBuckets(ctx context.Context, args *common.ListStorageBucketArguments) ([]common.StorageBucketInfo, error) { + client, ok := m.GetClient(&args.ClientID) + if !ok { + return nil, schema.InternalServerError("client not found", nil) + } + + return client.ListBuckets(ctx) +} + +// BucketExists checks if a bucket exists. +func (m *Manager) BucketExists(ctx context.Context, args *common.StorageBucketArguments) (bool, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return false, err + } + + return client.BucketExists(ctx, bucketName) +} + +// RemoveBucket removes a bucket, bucket should be empty to be successfully removed. +func (m *Manager) RemoveBucket(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.RemoveBucket(ctx, bucketName) +} + +// SetBucketTagging sets tags to a bucket. +func (m *Manager) SetBucketTagging(ctx context.Context, args *common.SetStorageBucketTaggingArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + + return client.SetBucketTagging(ctx, args) +} + +// GetBucketTagging gets tags of a bucket. +func (m *Manager) GetBucketTagging(ctx context.Context, args *common.StorageBucketArguments) (map[string]string, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketTagging(ctx, bucketName) +} + +// RemoveBucketTagging removes all tags on a bucket. +func (m *Manager) RemoveBucketTagging(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.RemoveBucketTagging(ctx, bucketName) +} + +// SetBucketLifecycle sets lifecycle on bucket or an object prefix. +func (m *Manager) SetBucketLifecycle(ctx context.Context, args *common.SetStorageBucketLifecycleArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SetBucketLifecycle(ctx, bucketName, args.BucketLifecycleConfiguration) +} + +// GetBucketLifecycle gets lifecycle on a bucket or a prefix. +func (m *Manager) GetBucketLifecycle(ctx context.Context, args *common.StorageBucketArguments) (*common.BucketLifecycleConfiguration, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketLifecycle(ctx, bucketName) +} + +// SetBucketEncryption sets default encryption configuration on a bucket. +func (m *Manager) SetBucketEncryption(ctx context.Context, args *common.SetStorageBucketEncryptionArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SetBucketEncryption(ctx, bucketName, args.ServerSideEncryptionConfiguration) +} + +// GetBucketEncryption gets default encryption configuration set on a bucket. +func (m *Manager) GetBucketEncryption(ctx context.Context, args *common.StorageBucketArguments) (*common.ServerSideEncryptionConfiguration, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketEncryption(ctx, bucketName) +} + +// RemoveBucketEncryption removes default encryption configuration set on a bucket. +func (m *Manager) RemoveBucketEncryption(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.RemoveBucketEncryption(ctx, bucketName) +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (m *Manager) SetObjectLockConfig(ctx context.Context, args *common.SetStorageObjectLockArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SetObjectLockConfig(ctx, bucketName, args.SetStorageObjectLockConfig) +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (m *Manager) GetObjectLockConfig(ctx context.Context, args *common.StorageBucketArguments) (*common.StorageObjectLockConfig, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetObjectLockConfig(ctx, bucketName) +} + +// EnableVersioning enables bucket versioning support. +func (m *Manager) EnableVersioning(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.EnableVersioning(ctx, bucketName) +} + +// SuspendVersioning disables bucket versioning support. +func (m *Manager) SuspendVersioning(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SuspendVersioning(ctx, bucketName) +} + +// GetBucketVersioning gets versioning configuration set on a bucket. +func (m *Manager) GetBucketVersioning(ctx context.Context, args *common.StorageBucketArguments) (*common.StorageBucketVersioningConfiguration, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketVersioning(ctx, bucketName) +} + +// SetBucketReplication sets replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO +// to associate the source and destination buckets for replication with the replication endpoint. +func (m *Manager) SetBucketReplication(ctx context.Context, args *common.SetStorageBucketReplicationArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.SetBucketReplication(ctx, bucketName, args.StorageReplicationConfig) +} + +// GetBucketReplication gets current replication config on a bucket. +func (m *Manager) GetBucketReplication(ctx context.Context, args *common.StorageBucketArguments) (*common.StorageReplicationConfig, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + return client.GetBucketReplication(ctx, bucketName) +} + +// RemoveBucketReplication removes replication configuration on a bucket. +func (m *Manager) RemoveBucketReplication(ctx context.Context, args *common.StorageBucketArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + return client.RemoveBucketReplication(ctx, bucketName) +} diff --git a/connector/storage/common/arguments.go b/connector/storage/common/arguments.go new file mode 100644 index 0000000..09fdbdf --- /dev/null +++ b/connector/storage/common/arguments.go @@ -0,0 +1,348 @@ +package common + +import ( + "time" + + "github.com/hasura/ndc-sdk-go/scalar" +) + +// ListStorageBucketArguments represent the input arguments for the ListBuckets methods. +type ListStorageBucketArguments struct { + // The storage client ID. + ClientID StorageClientID `json:"clientId,omitempty"` +} + +// StorageBucketArguments represent the common input arguments for bucket-related methods. +type StorageBucketArguments struct { + // The storage client ID. + ClientID *StorageClientID `json:"clientId,omitempty"` + // The bucket name. + Bucket string `json:"bucket,omitempty"` +} + +// CopyStorageObjectArguments represent input arguments of the CopyObject method. +type CopyStorageObjectArguments struct { + // The storage client ID + ClientID *StorageClientID `json:"clientId,omitempty"` + Dest StorageCopyDestOptions `json:"dest"` + Source StorageCopySrcOptions `json:"source"` +} + +// ComposeStorageObjectArguments represent input arguments of the ComposeObject method. +type ComposeStorageObjectArguments struct { + // The storage client ID + ClientID *StorageClientID `json:"clientId,omitempty"` + Dest StorageCopyDestOptions `json:"dest"` + Sources []StorageCopySrcOptions `json:"sources"` +} + +// MakeStorageBucketOptions holds all options to tweak bucket creation. +type MakeStorageBucketOptions struct { + // The storage client ID + ClientID *StorageClientID `json:"clientId,omitempty"` + // Bucket name + Name string `json:"name"` + // Bucket location + Region string `json:"region,omitempty"` + // Enable object locking + ObjectLocking bool `json:"objectLocking,omitempty"` +} + +// ListIncompleteUploadsArguments the input arguments of the ListIncompleteUploads method. +type ListIncompleteUploadsArguments struct { + StorageBucketArguments + + Prefix string `json:"prefix"` + Recursive bool `json:"recursive,omitempty"` +} + +// SetBucketTaggingArguments represent the input arguments for the SetBucketTagging method. +type SetStorageBucketTaggingArguments struct { + StorageBucketArguments + + Tags map[string]string `json:"tags"` +} + +// RemoveIncompleteUploadArguments represent the input arguments for the RemoveIncompleteUpload method. +type RemoveIncompleteUploadArguments struct { + StorageBucketArguments + + Object string `json:"object"` +} + +// PresignedGetStorageObjectArguments represent the input arguments for the PresignedGetObject method. +type PresignedGetStorageObjectArguments struct { + StorageBucketArguments + + Object string `json:"object"` + Expiry *scalar.Duration `json:"expiry"` + RequestParams map[string][]string `json:"requestParams,omitempty"` +} + +// PresignedPutStorageObjectArguments represent the input arguments for the PresignedPutObject method. +type PresignedPutStorageObjectArguments struct { + StorageBucketArguments + + Object string `json:"object"` + Expiry *scalar.Duration `json:"expiry"` +} + +// ListStorageObjectsOptions holds all options of a list object request. +type ListStorageObjectsOptions struct { + StorageBucketArguments + + // Include objects versions in the listing + WithVersions bool `json:"withVersions"` + // Include objects metadata in the listing + WithMetadata bool `json:"withMetadata"` + // Only list objects with the prefix + Prefix string `json:"prefix"` + // Ignore '/' delimiter + Recursive bool `json:"recursive"` + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int `json:"maxKeys"` + // StartAfter start listing lexically at this object onwards. + StartAfter string `json:"startAfter"` +} + +// GetStorageObjectOptions are used to specify additional headers or options during GET requests. +type GetStorageObjectOptions struct { + StorageBucketArguments + + Object string `json:"object"` + Headers map[string]string `json:"headers,omitempty"` + RequestParams map[string][]string `json:"requestParams,omitempty"` + // ServerSideEncryption *ServerSideEncryptionMethod `json:"serverSideEncryption"` + VersionID *string `json:"versionId"` + PartNumber *int `json:"partNumber"` + + // Include any checksums, if object was uploaded with checksum. + // For multipart objects this is a checksum of part checksums. + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + Checksum *bool `json:"checksum"` +} + +// StorageCopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs. +type StorageCopyDestOptions struct { + // points to destination bucket + Bucket string `json:"bucket,omitempty"` + // points to destination object + Object string `json:"object"` + + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + // Encryption *ServerSideEncryptionMethod `json:"encryption"` + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + UserMetadata map[string]string `json:"userMetadata,omitempty"` + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool `json:"replaceMetadata,omitempty"` + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string `json:"userTags,omitempty"` + ReplaceTags bool `json:"replaceTags,omitempty"` + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold *StorageLegalHoldStatus `json:"legalHold"` + + // Object Retention related fields + Mode *StorageRetentionMode `json:"mode"` + RetainUntilDate *time.Time `json:"retainUntilDate"` + + // Needs to be specified if progress bar is specified. + Size int64 `json:"size,omitempty"` +} + +// StorageCopySrcOptions represents a source object to be copied, using server-side copying APIs. +type StorageCopySrcOptions struct { + // source bucket + Bucket string `json:"bucket,omitempty"` + // source object + Object string `json:"object"` + + VersionID string `json:"versionId,omitempty"` + MatchETag string `json:"matchETag,omitempty"` + NoMatchETag string `json:"noMatchETag,omitempty"` + MatchModifiedSince *time.Time `json:"matchModifiedSince"` + MatchUnmodifiedSince *time.Time `json:"matchUnmodifiedSince"` + MatchRange bool `json:"matchRange,omitempty"` + Start int64 `json:"start,omitempty"` + End int64 `json:"end,omitempty"` + // Encryption *ServerSideEncryptionMethod `json:"encryption"` +} + +// RemoveStorageObjectOptions represents options specified by user for RemoveObject call. +type RemoveStorageObjectOptions struct { + StorageBucketArguments + + Object string `json:"object"` + ForceDelete bool `json:"forceDelete,omitempty"` + GovernanceBypass bool `json:"governanceBypass,omitempty"` + VersionID string `json:"versionId,omitempty"` +} + +// PutStorageObjectRetentionOptions represents options specified by user for PutObject call. +type PutStorageObjectRetentionOptions struct { + StorageBucketArguments + + Object string `json:"object"` + GovernanceBypass bool `json:"governanceBypass,omitempty"` + Mode *StorageRetentionMode `json:"mode"` + RetainUntilDate *time.Time `json:"retainUntilDate,omitempty"` + VersionID string `json:"versionId,omitempty"` +} + +// RemoveStorageObjectsOptions represents options specified by user for RemoveObjects call. +type RemoveStorageObjectsOptions struct { + ListStorageObjectsOptions + + GovernanceBypass bool `json:"governanceBypass,omitempty"` +} + +// PutStorageObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call. +type PutStorageObjectLegalHoldOptions struct { + StorageBucketArguments + + Object string `json:"object"` + VersionID string `json:"versionId,omitempty"` + Status *StorageLegalHoldStatus `json:"status"` +} + +// GetStorageObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call. +type GetStorageObjectLegalHoldOptions struct { + StorageBucketArguments + + Object string `json:"object"` + VersionID string `json:"versionId,omitempty"` +} + +// PutStorageObjectArguments represents input arguments of the PutObject method. +type PutStorageObjectArguments struct { + StorageBucketArguments + + Object string `json:"object"` + Options PutStorageObjectOptions `json:"options,omitempty"` +} + +// PutStorageObjectOptions represents options specified by user for PutObject call. +type PutStorageObjectOptions struct { + UserMetadata map[string]string `json:"userMetadata,omitempty"` + UserTags map[string]string `json:"userTags,omitempty"` + ContentType string `json:"contentType,omitempty"` + ContentEncoding string `json:"contentEncoding,omitempty"` + ContentDisposition string `json:"contentDisposition,omitempty"` + ContentLanguage string `json:"contentLanguage,omitempty"` + CacheControl string `json:"cacheControl,omitempty"` + Expires *time.Time `json:"expires,omitempty"` + Mode *StorageRetentionMode `json:"mode,omitempty"` + RetainUntilDate *time.Time `json:"retainUntilDate,omitempty"` + // ServerSideEncryption *ServerSideEncryptionMethod `json:"serverSideEncryption,omitempty"` + NumThreads uint `json:"numThreads,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + WebsiteRedirectLocation string `json:"websiteRedirectLocation,omitempty"` + PartSize uint64 `json:"partSize,omitempty"` + LegalHold *StorageLegalHoldStatus `json:"legalHold"` + SendContentMd5 bool `json:"sendContentMd5,omitempty"` + DisableContentSha256 bool `json:"disableContentSha256,omitempty"` + DisableMultipart bool `json:"disableMultipart,omitempty"` + + // AutoChecksum is the type of checksum that will be added if no other checksum is added, + // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type. + // If none is specified CRC32C is used, since it is generally the fastest. + AutoChecksum *ChecksumType `json:"autoChecksum"` + + // Checksum will force a checksum of the specific type. + // This requires that the client was created with "TrailingHeaders:true" option, + // and that the destination server supports it. + // Unavailable with V2 signatures & Google endpoints. + // This will disable content MD5 checksums if set. + Checksum *ChecksumType `json:"checksum"` + + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, + // fill them serially and upload them in parallel. + // This can be used for faster uploads on non-seekable or slow-to-seek input. + ConcurrentStreamParts bool `json:"concurrentStreamParts,omitempty"` +} + +// StorageObjectTaggingOptions holds an object version id to update tag(s) of a specific object version. +type StorageObjectTaggingOptions struct { + StorageBucketArguments + + Object string `json:"object"` + VersionID string `json:"versionId,omitempty"` +} + +// PutStorageObjectTaggingOptions holds an object version id to update tag(s) of a specific object version. +type PutStorageObjectTaggingOptions struct { + StorageBucketArguments + + Object string `json:"object"` + Tags map[string]string `json:"tags"` + VersionID string `json:"versionId,omitempty"` +} + +// StorageObjectAttributesOptions are options used for the GetObjectAttributes API. +type StorageObjectAttributesOptions struct { + StorageBucketArguments + + Object string `json:"object"` + // MaxParts How many parts the caller wants to be returned (default: 1000) + MaxParts int `json:"maxParts,omitempty"` + // VersionID The object version you want to attributes for + VersionID string `json:"versionId,omitempty"` + // PartNumberMarker the listing will start AFTER the part matching PartNumberMarker + PartNumberMarker int `json:"partNumberMarker,omitempty"` + // ServerSideEncryption The server-side encryption algorithm used when storing this object in Minio + // ServerSideEncryption *ServerSideEncryptionMethod `json:"serverSideEncryption"` +} + +// SetBucketNotificationArguments represents input arguments for the SetBucketNotification method. +type SetBucketNotificationArguments struct { + StorageBucketArguments + NotificationConfig +} + +// SetStorageBucketLifecycleArguments represents input arguments for the SetBucketLifecycle method. +type SetStorageBucketLifecycleArguments struct { + StorageBucketArguments + BucketLifecycleConfiguration +} + +// SetStorageBucketEncryptionArguments represents input arguments for the SetBucketEncryption method. +type SetStorageBucketEncryptionArguments struct { + StorageBucketArguments + ServerSideEncryptionConfiguration +} + +// SetStorageObjectLockArguments represents input arguments for the SetStorageObjectLock method. +type SetStorageObjectLockArguments struct { + StorageBucketArguments + SetStorageObjectLockConfig +} + +// SetStorageBucketReplicationArguments storage bucket replication arguments. +type SetStorageBucketReplicationArguments struct { + StorageBucketArguments + StorageReplicationConfig +} + +// PresignedURLResponse holds the presigned URL and expiry information. +type PresignedURLResponse struct { + URL string `json:"url"` + ExpiredAt string `json:"expiredAt"` +} diff --git a/connector/storage/common/storage.go b/connector/storage/common/storage.go new file mode 100644 index 0000000..d13c22f --- /dev/null +++ b/connector/storage/common/storage.go @@ -0,0 +1,594 @@ +package common + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/hasura/ndc-sdk-go/scalar" +) + +// StorageClient abstracts required methods of the storage client. +type StorageClient interface { //nolint:interfacebloat + // MakeBucket creates a new bucket. + MakeBucket(ctx context.Context, options *MakeStorageBucketOptions) error + // ListBuckets list all buckets. + ListBuckets(ctx context.Context) ([]StorageBucketInfo, error) + // BucketExists checks if a bucket exists. + BucketExists(ctx context.Context, bucketName string) (bool, error) + // RemoveBucket removes a bucket, bucket should be empty to be successfully removed. + RemoveBucket(ctx context.Context, bucketName string) error + // SetBucketTagging sets tags to a bucket. + SetBucketTagging(ctx context.Context, args *SetStorageBucketTaggingArguments) error + // GetBucketTagging gets tags of a bucket. + GetBucketTagging(ctx context.Context, bucketName string) (map[string]string, error) + // RemoveBucketTagging removes all tags on a bucket. + RemoveBucketTagging(ctx context.Context, bucketName string) error + // GetBucketPolicy gets access permissions on a bucket or a prefix. + GetBucketPolicy(ctx context.Context, bucketName string) (string, error) + // ListObjects lists objects in a bucket. + ListObjects(ctx context.Context, opts *ListStorageObjectsOptions) ([]StorageObject, error) + // ListIncompleteUploads list partially uploaded objects in a bucket. + ListIncompleteUploads(ctx context.Context, args *ListIncompleteUploadsArguments) ([]StorageObjectMultipartInfo, error) + // GetObject returns a stream of the object data. Most of the common errors occur when reading the stream. + GetObject(ctx context.Context, opts *GetStorageObjectOptions) (io.ReadCloser, error) + // PutObject uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, + // PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. + PutObject(ctx context.Context, args *PutStorageObjectArguments, reader io.Reader, objectSize int64) (*StorageUploadInfo, error) + // CopyObject creates or replaces an object through server-side copying of an existing object. + // It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. + // To copy multiple source objects into a single destination object see the ComposeObject API. + CopyObject(ctx context.Context, dest StorageCopyDestOptions, src StorageCopySrcOptions) (*StorageUploadInfo, error) + // ComposeObject creates an object by concatenating a list of source objects using server-side copying. + ComposeObject(ctx context.Context, dest StorageCopyDestOptions, srcs []StorageCopySrcOptions) (*StorageUploadInfo, error) + // StatObject fetches metadata of an object. + StatObject(ctx context.Context, opts *GetStorageObjectOptions) (*StorageObject, error) + // RemoveObject removes an object with some specified options + RemoveObject(ctx context.Context, opts *RemoveStorageObjectOptions) error + // PutObjectRetention applies object retention lock onto an object. + PutObjectRetention(ctx context.Context, opts *PutStorageObjectRetentionOptions) error + // RemoveObjects remove a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. + // The errors observed are sent over the error channel. + RemoveObjects(ctx context.Context, opts *RemoveStorageObjectsOptions) []RemoveStorageObjectError + // PutObjectLegalHold applies legal-hold onto an object. + PutObjectLegalHold(ctx context.Context, opts *PutStorageObjectLegalHoldOptions) error + // GetObjectLegalHold returns legal-hold status on a given object. + GetObjectLegalHold(ctx context.Context, options *GetStorageObjectLegalHoldOptions) (StorageLegalHoldStatus, error) + // PutObjectTagging sets new object Tags to the given object, replaces/overwrites any existing tags. + PutObjectTagging(ctx context.Context, options *PutStorageObjectTaggingOptions) error + // GetObjectTagging fetches Object Tags from the given object + GetObjectTagging(ctx context.Context, options *StorageObjectTaggingOptions) (map[string]string, error) + // RemoveObjectTagging removes Object Tags from the given object + RemoveObjectTagging(ctx context.Context, options *StorageObjectTaggingOptions) error + // GetObjectAttributes returns a stream of the object data. Most of the common errors occur when reading the stream. + GetObjectAttributes(ctx context.Context, opts *StorageObjectAttributesOptions) (*StorageObjectAttributes, error) + // RemoveIncompleteUpload removes a partially uploaded object. + RemoveIncompleteUpload(ctx context.Context, args *RemoveIncompleteUploadArguments) error + // PresignedGetObject generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. + // This presigned URL can have an associated expiration time in seconds after which it is no longer operational. + // The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. + PresignedGetObject(ctx context.Context, args *PresignedGetStorageObjectArguments) (*url.URL, error) + // PresignedPutObject generates a presigned URL for HTTP PUT operations. + // Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. + // This presigned URL can have an associated expiration time in seconds after which it is no longer operational. + // The default expiry is set to 7 days. + PresignedPutObject(ctx context.Context, args *PresignedPutStorageObjectArguments) (*url.URL, error) + // PresignedHeadObject generates a presigned URL for HTTP HEAD operations. + // Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. + // This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. + PresignedHeadObject(ctx context.Context, args *PresignedGetStorageObjectArguments) (*url.URL, error) + // GetBucketNotification gets notification configuration on a bucket. + GetBucketNotification(ctx context.Context, bucketName string) (*NotificationConfig, error) + // Set a new bucket notification on a bucket. + SetBucketNotification(ctx context.Context, bucketName string, config NotificationConfig) error + // Remove all configured bucket notifications on a bucket. + RemoveAllBucketNotification(ctx context.Context, bucketName string) error + // SetBucketLifecycle sets lifecycle on bucket or an object prefix. + SetBucketLifecycle(ctx context.Context, bucketname string, config BucketLifecycleConfiguration) error + // GetBucketLifecycle gets lifecycle on a bucket or a prefix. + GetBucketLifecycle(ctx context.Context, bucketName string) (*BucketLifecycleConfiguration, error) + // SetBucketEncryption sets default encryption configuration on a bucket. + SetBucketEncryption(ctx context.Context, bucketname string, config ServerSideEncryptionConfiguration) error + // GetBucketEncryption gets default encryption configuration set on a bucket. + GetBucketEncryption(ctx context.Context, bucketName string) (*ServerSideEncryptionConfiguration, error) + // RemoveBucketEncryption removes default encryption configuration set on a bucket. + RemoveBucketEncryption(ctx context.Context, bucketName string) error + // SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. + SetObjectLockConfig(ctx context.Context, bucketName string, config SetStorageObjectLockConfig) error + // GetObjectLockConfig gets object lock configuration of given bucket. + GetObjectLockConfig(ctx context.Context, bucketName string) (*StorageObjectLockConfig, error) + // EnableVersioning enables bucket versioning support. + EnableVersioning(ctx context.Context, bucketName string) error + // SuspendVersioning disables bucket versioning support. + SuspendVersioning(ctx context.Context, bucketName string) error + // GetBucketVersioning gets versioning configuration set on a bucket. + GetBucketVersioning(ctx context.Context, bucketName string) (*StorageBucketVersioningConfiguration, error) + // SetBucketReplication sets replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO + // to associate the source and destination buckets for replication with the replication endpoint. + SetBucketReplication(ctx context.Context, bucketname string, cfg StorageReplicationConfig) error + // GetBucketReplication gets current replication config on a bucket. + GetBucketReplication(ctx context.Context, bucketName string) (*StorageReplicationConfig, error) + // RemoveBucketReplication removes replication configuration on a bucket. + RemoveBucketReplication(ctx context.Context, bucketName string) error +} + +// StorageBucketInfo container for bucket metadata. +type StorageBucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// StorageOwner name. +type StorageOwner struct { + DisplayName *string `json:"name"` + ID *string `json:"id"` +} + +// StorageGrantee represents the person being granted permissions. +type StorageGrantee struct { + ID *string `json:"id"` + DisplayName *string `json:"displayName"` + URI *string `json:"uri"` +} + +// StorageGrant holds grant information. +type StorageGrant struct { + Grantee *StorageGrantee `json:"grantee"` + Permission *string `json:"permission"` +} + +// StorageRestoreInfo contains information of the restore operation of an archived object. +type StorageRestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool `json:"ongoingRestore"` + // When the restored copy of the archived object will be removed + ExpiryTime *time.Time `json:"expiryTime"` +} + +// StorageObject container for object metadata. +type StorageObject struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Bucket string `json:"bucket"` // Name of the bucket + Name string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata map[string][]string `json:"metadata,omitempty"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + // Only returned by MinIO servers. + UserMetadata map[string]string `json:"userMetadata,omitempty"` + + // x-amz-tagging values in their k/v values. + // Only returned by MinIO servers. + UserTags map[string]string `json:"userTags,omitempty"` + + // x-amz-tagging-count value + UserTagCount int `json:"userTagCount,omitempty"` + + // Owner name. + Owner *StorageOwner `json:"owner"` + + // ACL grant. + Grant []StorageGrant `json:"grant,omitempty"` + + // The class of storage used to store the object. + StorageClass *string `json:"storageClass,omitempty"` + + // Versioning related information + IsLatest *bool `json:"isLatest"` + IsDeleteMarker *bool `json:"isDeleteMarker"` + VersionID *string `json:"versionId,omitempty"` + + // x-amz-replication-status value is either in one of the following states + ReplicationStatus *StorageObjectReplicationStatus `json:"replicationStatus"` + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady *bool `json:"replicationReady"` + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration *time.Time `json:"expiration"` + ExpirationRuleID *string `json:"expirationRuleId"` + + Restore *StorageRestoreInfo `json:"restore"` + + // Checksum values + StorageObjectChecksum +} + +// StorageObjectReplicationStatus represents the x-amz-replication-status value enum. +// @enum COMPLETED,PENDING,FAILED,REPLICA +type StorageObjectReplicationStatus string + +// StorageObjectChecksum represents checksum values of the object. +type StorageObjectChecksum struct { + ChecksumCRC32 *string `json:"checksumCrc32,omitempty"` + ChecksumCRC32C *string `json:"checksumCrc32C,omitempty"` + ChecksumSHA1 *string `json:"checksumSha1,omitempty"` + ChecksumSHA256 *string `json:"checksumSha256,omitempty"` + ChecksumCRC64NVME *string `json:"checksumCrc64Nvme,omitempty"` +} + +// StorageUploadInfo represents the information of the uploaded object. +type StorageUploadInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Bucket string `json:"bucket"` // Name of the bucket + Name string `json:"name"` // Name of the object + LastModified *time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + Location *string `json:"location"` + VersionID *string `json:"versionId"` + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration *time.Time `json:"expiration"` + ExpirationRuleID *string `json:"expirationRuleId"` + + // Checksum values + StorageObjectChecksum +} + +// StorageObjectMultipartInfo container for multipart object metadata. +type StorageObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `json:"initiated"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string `json:"storageClass,omitempty"` + + // Key of the object for which the multipart upload was initiated. + Key string `json:"key,omitempty"` + + // Size in bytes of the object. + Size int64 `json:"size,omitempty"` + + // Upload ID that identifies the multipart upload. + UploadID string `json:"uploadId,omitempty"` +} + +// EncryptionMethod represents a server-side-encryption method enum. +// @enum SSE_C,KMS,S3 +// type ServerSideEncryptionMethod string + +// StorageRetentionMode the object retention mode. +// @enum GOVERNANCE,COMPLIANCE +type StorageRetentionMode string + +// StorageLegalHoldStatus the object legal hold status. +// @enum ON,OFF +type StorageLegalHoldStatus string + +// RemoveStorageObjectError the container of Multi Delete S3 API error. +type RemoveStorageObjectError struct { + ObjectName string `json:"objectName"` + VersionID string `json:"versionId"` + Error error `json:"error"` +} + +// ChecksumType contains information about the checksum type. +// @enum SHA256,SHA1,CRC32,CRC32C,CRC64NVME,FullObjectCRC32,FullObjectCRC32C,None +type ChecksumType string + +// StorageObjectAttributes is the response object returned by the GetObjectAttributes API. +type StorageObjectAttributes struct { + // The object version + VersionID *string `json:"versionId"` + // The last time the object was modified + LastModified time.Time `json:"lastModified"` + // Contains more information about the object + StorageObjectAttributesResponse +} + +type StorageObjectParts struct { + // Contains the total part count for the object (not the current response) + PartsCount int `json:"partsCount"` + // Pagination of parts will begin at (but not include) PartNumberMarker + PartNumberMarker int `json:"partNumberMarker"` + // The next PartNumberMarker to be used in order to continue pagination + NextPartNumberMarker int `json:"nextPartNumberMarker"` + // Reflects the MaxParts used by the caller or the default MaxParts value of the API + MaxParts int `json:"maxParts"` + // Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end) + IsTruncated bool `json:"isTruncated"` + Parts []*StorageObjectAttributePart `json:"parts"` +} + +// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part. +type StorageObjectAttributePart struct { + StorageObjectChecksum + PartNumber int `json:"partNumber"` + Size int `json:"size"` +} + +// StorageObjectAttributesResponse contains details returned by the GetObjectAttributes API. +type StorageObjectAttributesResponse struct { + ETag string `json:"etag,omitempty"` + StorageClass string `json:"storageClass"` + ObjectSize int `json:"objectSize"` + Checksum StorageObjectChecksum `json:"checksum"` + ObjectParts StorageObjectParts `json:"objectParts"` +} + +// NotificationCommonConfig - represents one single notification configuration +// such as topic, queue or lambda configuration. +type NotificationCommonConfig struct { + ID *string `json:"id,omitempty"` + Arn *string `json:"arn"` + Events []string `json:"event"` + Filter *NotificationFilter `json:"filter,omitempty"` +} + +// NotificationTopicConfig carries one single topic notification configuration +type NotificationTopicConfig struct { + NotificationCommonConfig + Topic string `json:"topic"` +} + +// NotificationQueueConfig carries one single queue notification configuration +type NotificationQueueConfig struct { + NotificationCommonConfig + Queue string `json:"queue"` +} + +// NotificationLambdaConfig carries one single cloudfunction notification configuration +type NotificationLambdaConfig struct { + NotificationCommonConfig + Lambda string `json:"cloudFunction"` +} + +// NotificationConfig the struct that represents a notification configration object. +type NotificationConfig struct { + LambdaConfigs []NotificationLambdaConfig `json:"cloudFunctionConfigurations"` + TopicConfigs []NotificationTopicConfig `json:"topicConfigurations"` + QueueConfigs []NotificationQueueConfig `json:"queueConfigurations"` +} + +// NotificationFilter - a tag in the notification xml structure which carries suffix/prefix filters +type NotificationFilter struct { + S3Key *NotificationS3Key `json:"s3Key,omitempty"` +} + +// NotificationFilterRule child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type NotificationFilterRule struct { + Name string `json:"name"` + Value string `json:"value"` +} + +// NotificationS3Key child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type NotificationS3Key struct { + FilterRules []NotificationFilterRule `json:"filterRule,omitempty"` +} + +// BucketLifecycleRule represents a single rule in lifecycle configuration +type BucketLifecycleRule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"abortIncompleteMultipartUpload"` + Expiration *LifecycleExpiration `json:"expiration,omitempty"` + DelMarkerExpiration *LifecycleDelMarkerExpiration `json:"delMarkerExpiration,omitempty"` + AllVersionsExpiration *LifecycleAllVersionsExpiration `json:"allVersionsExpiration,omitempty"` + ID string `json:"id"` + RuleFilter *LifecycleFilter `json:"filter,omitempty"` + NoncurrentVersionExpiration *LifecycleNoncurrentVersionExpiration `json:"noncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *LifecycleNoncurrentVersionTransition `json:"noncurrentVersionTransition,omitempty"` + Prefix *string `json:"prefix,omitempty"` + Status *string `json:"status"` + Transition *LifecycleTransition `json:"transition,omitempty"` +} + +// BucketLifecycleConfiguration is a collection of lifecycle Rule objects. +type BucketLifecycleConfiguration struct { + Rules []BucketLifecycleRule `json:"rules"` +} + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation *int `json:"daysAfterInitiation"` +} + +// LifecycleExpiration expiration details of lifecycle configuration +type LifecycleExpiration struct { + Date *scalar.Date `json:"date,omitempty"` + Days *int `json:"days,omitempty"` + DeleteMarker *bool `json:"expiredObjectDeleteMarker,omitempty"` + DeleteAll *bool `json:"expiredObjectAllVersions,omitempty"` +} + +// IsEmpty checks if all properties of the object are empty. +func (fe LifecycleExpiration) IsEmpty() bool { + return fe.DeleteAll == nil && fe.Date == nil && fe.Days == nil && fe.DeleteMarker == nil +} + +// LifecycleTransition transition details of lifecycle configuration +type LifecycleTransition struct { + Date *scalar.Date `json:"date"` + StorageClass *string `json:"storageClass"` + Days *int `json:"days"` +} + +// IsEmpty checks if all properties of the object are empty. +func (fe LifecycleTransition) IsEmpty() bool { + return fe.StorageClass == nil && fe.Date == nil && fe.Days == nil +} + +// LifecycleDelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy +type LifecycleDelMarkerExpiration struct { + Days *int `json:"days"` +} + +// LifecycleAllVersionsExpiration represents AllVersionsExpiration actions element in an ILM policy +type LifecycleAllVersionsExpiration struct { + Days *int `json:"days"` + DeleteMarker *bool `json:"deleteMarker"` +} + +// LifecycleFilter will be used in selecting rule(s) for lifecycle configuration +type LifecycleFilter struct { + And *LifecycleFilterAnd `json:"and,omitempty"` + Prefix *string `json:"prefix,omitempty"` + Tag *StorageTag `json:"tag,omitempty"` + ObjectSizeLessThan *int64 `json:"objectSizeLessThan,omitempty"` + ObjectSizeGreaterThan *int64 `json:"objectSizeGreaterThan,omitempty"` +} + +// LifecycleFilterAnd the And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type LifecycleFilterAnd struct { + Prefix *string `json:"prefix,omitempty"` + Tags []StorageTag `json:"tags,omitempty"` + ObjectSizeLessThan *int64 `json:"objectSizeLessThan,omitempty"` + ObjectSizeGreaterThan *int64 `json:"objectSizeGreaterThan,omitempty"` +} + +// StorageTag structure key/value pair representing an object tag to apply configuration +type StorageTag struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// LifecycleNoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type LifecycleNoncurrentVersionExpiration struct { + NoncurrentDays *int `json:"noncurrentDays,omitempty"` + NewerNoncurrentVersions *int `json:"newerNoncurrentVersions,omitempty"` +} + +// LifecycleNoncurrentVersionTransition sets this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type LifecycleNoncurrentVersionTransition struct { + StorageClass *string `json:"storageClass,omitempty"` + NoncurrentDays *int `json:"noncurrentDays"` + NewerNoncurrentVersions *int `json:"newerNoncurrentVersions,omitempty"` +} + +// StorageApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to `aws:kms“. +// Minio currently does not support Kms. +type StorageApplySSEByDefault struct { + KmsMasterKeyID *string `json:"kmsMasterKeyId,omitempty"` + SSEAlgorithm string `json:"sseAlgorithm"` +} + +// ServerSideEncryptionRule rule layer encapsulates default encryption configuration +type ServerSideEncryptionRule struct { + Apply StorageApplySSEByDefault `json:"apply"` +} + +// ServerSideEncryptionConfiguration is the default encryption configuration structure. +type ServerSideEncryptionConfiguration struct { + Rules []ServerSideEncryptionRule `json:"rules"` +} + +// SetStorageObjectLockConfig represents the object lock configuration options in given bucket +type SetStorageObjectLockConfig struct { + Mode *StorageRetentionMode `json:"mode"` + Validity *uint `json:"validity"` + Unit *StorageRetentionValidityUnit `json:"unit"` +} + +// SetStorageObjectLockConfig represents the object lock configuration in given bucket +type StorageObjectLockConfig struct { + SetStorageObjectLockConfig + + ObjectLock string `json:"objectLock"` +} + +// StorageRetentionValidityUnit retention validity unit. +// @enum DAYS,YEARS +type StorageRetentionValidityUnit string + +// StorageBucketVersioningConfiguration is the versioning configuration structure +type StorageBucketVersioningConfiguration struct { + Status *string `json:"status"` + MFADelete *string `json:"mfaDelete"` + // MinIO extension - allows selective, prefix-level versioning exclusion. + // Requires versioning to be enabled + ExcludedPrefixes []string `json:"excludedPrefixes,omitempty"` + ExcludeFolders *bool `json:"excludeFolders"` +} + +// StorageReplicationConfig replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type StorageReplicationConfig struct { + Rules []StorageReplicationRule `json:"rules"` + Role *string `json:"role"` +} + +// StorageReplicationRule a rule for replication configuration. +type StorageReplicationRule struct { + ID *string `json:"id,omitempty"` + Status StorageReplicationRuleStatus `json:"status"` + Priority int `json:"priority"` + DeleteMarkerReplication *DeleteMarkerReplication `json:"deleteMarkerReplication"` + DeleteReplication *DeleteReplication `json:"deleteReplication"` + Destination *StorageReplicationDestination `json:"destination"` + Filter StorageReplicationFilter `json:"filter"` + SourceSelectionCriteria *SourceSelectionCriteria `json:"sourceSelectionCriteria"` + ExistingObjectReplication *ExistingObjectReplication `json:"existingObjectReplication,omitempty"` +} + +// Destination the destination in ReplicationConfiguration. +type StorageReplicationDestination struct { + Bucket string `json:"bucket"` + StorageClass *string `json:"storageClass,omitempty"` +} + +// ExistingObjectReplication whether existing object replication is enabled +type ExistingObjectReplication struct { + Status StorageReplicationRuleStatus `json:"status"` // should be set to "Disabled" by default +} + +// StorageReplicationRuleStatus represents Enabled/Disabled status +// @enum Enabled,Disabled +type StorageReplicationRuleStatus string + +// DeleteMarkerReplication whether delete markers are replicated - +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status StorageReplicationRuleStatus `json:"status"` // should be set to "Disabled" by default +} + +// DeleteReplication whether versioned deletes are replicated. This is a MinIO specific extension +type DeleteReplication struct { + Status StorageReplicationRuleStatus `json:"status"` // should be set to "Disabled" by default +} + +// ReplicaModifications specifies if replica modification sync is enabled +type ReplicaModifications struct { + Status StorageReplicationRuleStatus `json:"status"` // should be set to "Enabled" by default +} + +// SourceSelectionCriteria specifies additional source selection criteria in ReplicationConfiguration. +type SourceSelectionCriteria struct { + ReplicaModifications *ReplicaModifications `json:"replicaModifications"` +} + +// StorageReplicationFilter a filter for a replication configuration Rule. +type StorageReplicationFilter struct { + Prefix *string `json:"rrefix,omitempty"` + And *StorageReplicationFilterAnd `json:"and,omitempty"` + Tag *StorageTag `json:"tag,omitempty"` +} + +// StorageReplicationFilterAnd - a tag to combine a prefix and multiple tags for replication configuration rule. +type StorageReplicationFilterAnd struct { + Prefix *string `json:"rrefix,omitempty"` + Tags []StorageTag `json:"tag,omitempty"` +} diff --git a/connector/storage/common/telemetry.go b/connector/storage/common/telemetry.go new file mode 100644 index 0000000..6e6870b --- /dev/null +++ b/connector/storage/common/telemetry.go @@ -0,0 +1,100 @@ +package common + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// NewDBSystemAttribute creates the detault db.system attribute. +func NewDBSystemAttribute() attribute.KeyValue { + return attribute.String("db.system", "storage") +} + +// SetObjectChecksumSpanAttributes sets span attributes from the object checksum. +func SetObjectChecksumSpanAttributes(span trace.Span, object *StorageObjectChecksum) { + if object.ChecksumCRC32 != nil { + span.SetAttributes(attribute.String("storage.object.checksum_crc32", *object.ChecksumCRC32)) + } + + if object.ChecksumCRC32C != nil { + span.SetAttributes(attribute.String("storage.object.checksum_crc32c", *object.ChecksumCRC32C)) + } + + if object.ChecksumCRC64NVME != nil { + span.SetAttributes(attribute.String("storage.object.checksum_crc64nvme", *object.ChecksumCRC64NVME)) + } + + if object.ChecksumSHA1 != nil { + span.SetAttributes(attribute.String("storage.object.checksum_sha1", *object.ChecksumSHA1)) + } + + if object.ChecksumSHA256 != nil { + span.SetAttributes(attribute.String("storage.object.checksum_sha256", *object.ChecksumSHA256)) + } +} + +// SetObjectInfoSpanAttributes sets span attributes from the object info. +func SetObjectInfoSpanAttributes(span trace.Span, object *StorageObject) { + span.SetAttributes(attribute.Int64("storage.object.size", object.Size)) + SetObjectChecksumSpanAttributes(span, &object.StorageObjectChecksum) + + if object.ETag != "" { + span.SetAttributes(attribute.String("storage.object.etag", object.ETag)) + } + + if object.StorageClass != nil { + span.SetAttributes(attribute.String("storage.object.storage_class", *object.StorageClass)) + } + + if object.VersionID != nil { + span.SetAttributes(attribute.String("storage.object.version", *object.VersionID)) + } + + if object.UserTagCount > 0 { + span.SetAttributes(attribute.Int("storage.object.user_tag_count", object.UserTagCount)) + } + + if len(object.Metadata) > 0 { + span.SetAttributes(attribute.Int("storage.object.metadata_count", len(object.Metadata))) + } + + if len(object.UserMetadata) > 0 { + span.SetAttributes(attribute.Int("storage.object.user_metadata_count", len(object.UserMetadata))) + } + + if !object.Expires.IsZero() { + span.SetAttributes(attribute.String("storage.object.expires", object.Expires.Format(time.RFC3339))) + } + + if object.Expiration != nil && !object.Expiration.IsZero() { + span.SetAttributes(attribute.String("storage.object.expiration", object.Expiration.Format(time.RFC3339))) + } + + if object.ExpirationRuleID != nil { + span.SetAttributes(attribute.String("storage.object.expiration_rule_id", *object.ExpirationRuleID)) + } +} + +// SetUploadInfoAttributes sets span attributes from the upload info. +func SetUploadInfoAttributes(span trace.Span, object *StorageUploadInfo) { + span.SetAttributes(attribute.Int64("storage.object.size", object.Size)) + SetObjectChecksumSpanAttributes(span, &object.StorageObjectChecksum) + + if object.ETag != "" { + span.SetAttributes(attribute.String("storage.object.etag", object.ETag)) + } + + if object.VersionID != nil { + span.SetAttributes(attribute.String("storage.object.version", *object.VersionID)) + } + + if object.Expiration != nil && !object.Expiration.IsZero() { + span.SetAttributes(attribute.String("storage.object.expiration", object.Expiration.Format(time.RFC3339))) + } + + if object.ExpirationRuleID != nil { + span.SetAttributes(attribute.String("storage.object.expiration_rule_id", *object.ExpirationRuleID)) + } +} diff --git a/connector/storage/common/types.generated.go b/connector/storage/common/types.generated.go new file mode 100644 index 0000000..d26d98b --- /dev/null +++ b/connector/storage/common/types.generated.go @@ -0,0 +1,1279 @@ +// Code generated by github.com/hasura/ndc-sdk-go/cmd/hasura-ndc-go, DO NOT EDIT. +package common + +import ( + "encoding/json" + "errors" + "github.com/hasura/ndc-sdk-go/scalar" + "github.com/hasura/ndc-sdk-go/utils" + "slices" +) + +// FromValue decodes values from map +func (j *GetStorageObjectLegalHoldOptions) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + j.VersionID, err = utils.GetStringDefault(input, "versionId") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *GetStorageObjectOptions) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Checksum, err = utils.GetNullableBoolean(input, "checksum") + if err != nil { + return err + } + j.Headers, err = utils.DecodeObjectValueDefault[map[string]string](input, "headers") + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + j.PartNumber, err = utils.GetNullableInt[int](input, "partNumber") + if err != nil { + return err + } + j.RequestParams, err = utils.DecodeObjectValueDefault[map[string][]string](input, "requestParams") + if err != nil { + return err + } + j.VersionID, err = utils.GetNullableString(input, "versionId") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *ListIncompleteUploadsArguments) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Prefix, err = utils.GetString(input, "prefix") + if err != nil { + return err + } + j.Recursive, err = utils.GetBooleanDefault(input, "recursive") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *ListStorageBucketArguments) FromValue(input map[string]any) error { + var err error + j.ClientID, err = utils.DecodeObjectValueDefault[StorageClientID](input, "clientId") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *PresignedGetStorageObjectArguments) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Expiry, err = utils.DecodeNullableObjectValue[scalar.Duration](input, "expiry") + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + j.RequestParams, err = utils.DecodeObjectValueDefault[map[string][]string](input, "requestParams") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *PresignedPutStorageObjectArguments) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Expiry, err = utils.DecodeNullableObjectValue[scalar.Duration](input, "expiry") + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *StorageBucketArguments) FromValue(input map[string]any) error { + var err error + j.Bucket, err = utils.GetStringDefault(input, "bucket") + if err != nil { + return err + } + j.ClientID, err = utils.DecodeNullableObjectValue[StorageClientID](input, "clientId") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *StorageObjectAttributesOptions) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.MaxParts, err = utils.GetIntDefault[int](input, "maxParts") + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + j.PartNumberMarker, err = utils.GetIntDefault[int](input, "partNumberMarker") + if err != nil { + return err + } + j.VersionID, err = utils.GetStringDefault(input, "versionId") + if err != nil { + return err + } + return nil +} + +// FromValue decodes values from map +func (j *StorageObjectTaggingOptions) FromValue(input map[string]any) error { + var err error + j.StorageBucketArguments, err = utils.DecodeObject[StorageBucketArguments](input) + if err != nil { + return err + } + j.Object, err = utils.GetString(input, "object") + if err != nil { + return err + } + j.VersionID, err = utils.GetStringDefault(input, "versionId") + if err != nil { + return err + } + return nil +} + +// ToMap encodes the struct to a value map +func (j AbortIncompleteMultipartUpload) ToMap() map[string]any { + r := make(map[string]any) + r["daysAfterInitiation"] = j.DaysAfterInitiation + + return r +} + +// ToMap encodes the struct to a value map +func (j BucketLifecycleConfiguration) ToMap() map[string]any { + r := make(map[string]any) + j_Rules := make([]any, len(j.Rules)) + for i, j_Rules_v := range j.Rules { + j_Rules[i] = j_Rules_v + } + r["rules"] = j_Rules + + return r +} + +// ToMap encodes the struct to a value map +func (j BucketLifecycleRule) ToMap() map[string]any { + r := make(map[string]any) + if j.AbortIncompleteMultipartUpload != nil { + r["abortIncompleteMultipartUpload"] = (*j.AbortIncompleteMultipartUpload) + } + if j.AllVersionsExpiration != nil { + r["allVersionsExpiration"] = (*j.AllVersionsExpiration) + } + if j.DelMarkerExpiration != nil { + r["delMarkerExpiration"] = (*j.DelMarkerExpiration) + } + if j.Expiration != nil { + r["expiration"] = (*j.Expiration) + } + if j.RuleFilter != nil { + r["filter"] = (*j.RuleFilter) + } + r["id"] = j.ID + if j.NoncurrentVersionExpiration != nil { + r["noncurrentVersionExpiration"] = (*j.NoncurrentVersionExpiration) + } + if j.NoncurrentVersionTransition != nil { + r["noncurrentVersionTransition"] = (*j.NoncurrentVersionTransition) + } + r["prefix"] = j.Prefix + r["status"] = j.Status + if j.Transition != nil { + r["transition"] = (*j.Transition) + } + + return r +} + +// ToMap encodes the struct to a value map +func (j DeleteMarkerReplication) ToMap() map[string]any { + r := make(map[string]any) + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j DeleteReplication) ToMap() map[string]any { + r := make(map[string]any) + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j ExistingObjectReplication) ToMap() map[string]any { + r := make(map[string]any) + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleAllVersionsExpiration) ToMap() map[string]any { + r := make(map[string]any) + r["days"] = j.Days + r["deleteMarker"] = j.DeleteMarker + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleDelMarkerExpiration) ToMap() map[string]any { + r := make(map[string]any) + r["days"] = j.Days + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleExpiration) ToMap() map[string]any { + r := make(map[string]any) + r["date"] = j.Date + r["days"] = j.Days + r["expiredObjectAllVersions"] = j.DeleteAll + r["expiredObjectDeleteMarker"] = j.DeleteMarker + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleFilter) ToMap() map[string]any { + r := make(map[string]any) + if j.And != nil { + r["and"] = (*j.And) + } + r["objectSizeGreaterThan"] = j.ObjectSizeGreaterThan + r["objectSizeLessThan"] = j.ObjectSizeLessThan + r["prefix"] = j.Prefix + if j.Tag != nil { + r["tag"] = (*j.Tag) + } + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleFilterAnd) ToMap() map[string]any { + r := make(map[string]any) + r["objectSizeGreaterThan"] = j.ObjectSizeGreaterThan + r["objectSizeLessThan"] = j.ObjectSizeLessThan + r["prefix"] = j.Prefix + j_Tags := make([]any, len(j.Tags)) + for i, j_Tags_v := range j.Tags { + j_Tags[i] = j_Tags_v + } + r["tags"] = j_Tags + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleNoncurrentVersionExpiration) ToMap() map[string]any { + r := make(map[string]any) + r["newerNoncurrentVersions"] = j.NewerNoncurrentVersions + r["noncurrentDays"] = j.NoncurrentDays + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleNoncurrentVersionTransition) ToMap() map[string]any { + r := make(map[string]any) + r["newerNoncurrentVersions"] = j.NewerNoncurrentVersions + r["noncurrentDays"] = j.NoncurrentDays + r["storageClass"] = j.StorageClass + + return r +} + +// ToMap encodes the struct to a value map +func (j LifecycleTransition) ToMap() map[string]any { + r := make(map[string]any) + r["date"] = j.Date + r["days"] = j.Days + r["storageClass"] = j.StorageClass + + return r +} + +// ToMap encodes the struct to a value map +func (j ListStorageObjectsOptions) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageBucketArguments.ToMap()) + r["maxKeys"] = j.MaxKeys + r["prefix"] = j.Prefix + r["recursive"] = j.Recursive + r["startAfter"] = j.StartAfter + r["withMetadata"] = j.WithMetadata + r["withVersions"] = j.WithVersions + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationCommonConfig) ToMap() map[string]any { + r := make(map[string]any) + r["arn"] = j.Arn + r["event"] = j.Events + if j.Filter != nil { + r["filter"] = (*j.Filter) + } + r["id"] = j.ID + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationConfig) ToMap() map[string]any { + r := make(map[string]any) + j_LambdaConfigs := make([]any, len(j.LambdaConfigs)) + for i, j_LambdaConfigs_v := range j.LambdaConfigs { + j_LambdaConfigs[i] = j_LambdaConfigs_v + } + r["cloudFunctionConfigurations"] = j_LambdaConfigs + j_QueueConfigs := make([]any, len(j.QueueConfigs)) + for i, j_QueueConfigs_v := range j.QueueConfigs { + j_QueueConfigs[i] = j_QueueConfigs_v + } + r["queueConfigurations"] = j_QueueConfigs + j_TopicConfigs := make([]any, len(j.TopicConfigs)) + for i, j_TopicConfigs_v := range j.TopicConfigs { + j_TopicConfigs[i] = j_TopicConfigs_v + } + r["topicConfigurations"] = j_TopicConfigs + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationFilter) ToMap() map[string]any { + r := make(map[string]any) + if j.S3Key != nil { + r["s3Key"] = (*j.S3Key) + } + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationFilterRule) ToMap() map[string]any { + r := make(map[string]any) + r["name"] = j.Name + r["value"] = j.Value + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationLambdaConfig) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.NotificationCommonConfig.ToMap()) + r["cloudFunction"] = j.Lambda + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationQueueConfig) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.NotificationCommonConfig.ToMap()) + r["queue"] = j.Queue + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationS3Key) ToMap() map[string]any { + r := make(map[string]any) + j_FilterRules := make([]any, len(j.FilterRules)) + for i, j_FilterRules_v := range j.FilterRules { + j_FilterRules[i] = j_FilterRules_v + } + r["filterRule"] = j_FilterRules + + return r +} + +// ToMap encodes the struct to a value map +func (j NotificationTopicConfig) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.NotificationCommonConfig.ToMap()) + r["topic"] = j.Topic + + return r +} + +// ToMap encodes the struct to a value map +func (j PresignedURLResponse) ToMap() map[string]any { + r := make(map[string]any) + r["expiredAt"] = j.ExpiredAt + r["url"] = j.URL + + return r +} + +// ToMap encodes the struct to a value map +func (j PutStorageObjectArguments) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageBucketArguments.ToMap()) + r["object"] = j.Object + r["options"] = j.Options + + return r +} + +// ToMap encodes the struct to a value map +func (j PutStorageObjectOptions) ToMap() map[string]any { + r := make(map[string]any) + r["autoChecksum"] = j.AutoChecksum + r["cacheControl"] = j.CacheControl + r["checksum"] = j.Checksum + r["concurrentStreamParts"] = j.ConcurrentStreamParts + r["contentDisposition"] = j.ContentDisposition + r["contentEncoding"] = j.ContentEncoding + r["contentLanguage"] = j.ContentLanguage + r["contentType"] = j.ContentType + r["disableContentSha256"] = j.DisableContentSha256 + r["disableMultipart"] = j.DisableMultipart + r["expires"] = j.Expires + r["legalHold"] = j.LegalHold + r["mode"] = j.Mode + r["numThreads"] = j.NumThreads + r["partSize"] = j.PartSize + r["retainUntilDate"] = j.RetainUntilDate + r["sendContentMd5"] = j.SendContentMd5 + r["storageClass"] = j.StorageClass + r["userMetadata"] = j.UserMetadata + r["userTags"] = j.UserTags + r["websiteRedirectLocation"] = j.WebsiteRedirectLocation + + return r +} + +// ToMap encodes the struct to a value map +func (j RemoveStorageObjectError) ToMap() map[string]any { + r := make(map[string]any) + r["error"] = j.Error + r["objectName"] = j.ObjectName + r["versionId"] = j.VersionID + + return r +} + +// ToMap encodes the struct to a value map +func (j ReplicaModifications) ToMap() map[string]any { + r := make(map[string]any) + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j ServerSideEncryptionConfiguration) ToMap() map[string]any { + r := make(map[string]any) + j_Rules := make([]any, len(j.Rules)) + for i, j_Rules_v := range j.Rules { + j_Rules[i] = j_Rules_v + } + r["rules"] = j_Rules + + return r +} + +// ToMap encodes the struct to a value map +func (j ServerSideEncryptionRule) ToMap() map[string]any { + r := make(map[string]any) + r["apply"] = j.Apply + + return r +} + +// ToMap encodes the struct to a value map +func (j SetStorageObjectLockConfig) ToMap() map[string]any { + r := make(map[string]any) + r["mode"] = j.Mode + r["unit"] = j.Unit + r["validity"] = j.Validity + + return r +} + +// ToMap encodes the struct to a value map +func (j SourceSelectionCriteria) ToMap() map[string]any { + r := make(map[string]any) + if j.ReplicaModifications != nil { + r["replicaModifications"] = (*j.ReplicaModifications) + } + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageApplySSEByDefault) ToMap() map[string]any { + r := make(map[string]any) + r["kmsMasterKeyId"] = j.KmsMasterKeyID + r["sseAlgorithm"] = j.SSEAlgorithm + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageBucketArguments) ToMap() map[string]any { + r := make(map[string]any) + r["bucket"] = j.Bucket + r["clientId"] = j.ClientID + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageBucketInfo) ToMap() map[string]any { + r := make(map[string]any) + r["creationDate"] = j.CreationDate + r["name"] = j.Name + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageBucketVersioningConfiguration) ToMap() map[string]any { + r := make(map[string]any) + r["excludeFolders"] = j.ExcludeFolders + r["excludedPrefixes"] = j.ExcludedPrefixes + r["mfaDelete"] = j.MFADelete + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageCopyDestOptions) ToMap() map[string]any { + r := make(map[string]any) + r["bucket"] = j.Bucket + r["legalHold"] = j.LegalHold + r["mode"] = j.Mode + r["object"] = j.Object + r["replaceMetadata"] = j.ReplaceMetadata + r["replaceTags"] = j.ReplaceTags + r["retainUntilDate"] = j.RetainUntilDate + r["size"] = j.Size + r["userMetadata"] = j.UserMetadata + r["userTags"] = j.UserTags + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageCopySrcOptions) ToMap() map[string]any { + r := make(map[string]any) + r["bucket"] = j.Bucket + r["end"] = j.End + r["matchETag"] = j.MatchETag + r["matchModifiedSince"] = j.MatchModifiedSince + r["matchRange"] = j.MatchRange + r["matchUnmodifiedSince"] = j.MatchUnmodifiedSince + r["noMatchETag"] = j.NoMatchETag + r["object"] = j.Object + r["start"] = j.Start + r["versionId"] = j.VersionID + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageGrant) ToMap() map[string]any { + r := make(map[string]any) + if j.Grantee != nil { + r["grantee"] = (*j.Grantee) + } + r["permission"] = j.Permission + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageGrantee) ToMap() map[string]any { + r := make(map[string]any) + r["displayName"] = j.DisplayName + r["id"] = j.ID + r["uri"] = j.URI + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObject) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageObjectChecksum.ToMap()) + r["bucket"] = j.Bucket + r["contentType"] = j.ContentType + r["etag"] = j.ETag + r["expiration"] = j.Expiration + r["expirationRuleId"] = j.ExpirationRuleID + r["expires"] = j.Expires + j_Grant := make([]any, len(j.Grant)) + for i, j_Grant_v := range j.Grant { + j_Grant[i] = j_Grant_v + } + r["grant"] = j_Grant + r["isDeleteMarker"] = j.IsDeleteMarker + r["isLatest"] = j.IsLatest + r["lastModified"] = j.LastModified + r["metadata"] = j.Metadata + r["name"] = j.Name + if j.Owner != nil { + r["owner"] = (*j.Owner) + } + r["replicationReady"] = j.ReplicationReady + r["replicationStatus"] = j.ReplicationStatus + if j.Restore != nil { + r["restore"] = (*j.Restore) + } + r["size"] = j.Size + r["storageClass"] = j.StorageClass + r["userMetadata"] = j.UserMetadata + r["userTagCount"] = j.UserTagCount + r["userTags"] = j.UserTags + r["versionId"] = j.VersionID + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectAttributePart) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageObjectChecksum.ToMap()) + r["partNumber"] = j.PartNumber + r["size"] = j.Size + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectAttributes) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageObjectAttributesResponse.ToMap()) + r["lastModified"] = j.LastModified + r["versionId"] = j.VersionID + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectAttributesResponse) ToMap() map[string]any { + r := make(map[string]any) + r["checksum"] = j.Checksum + r["etag"] = j.ETag + r["objectParts"] = j.ObjectParts + r["objectSize"] = j.ObjectSize + r["storageClass"] = j.StorageClass + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectChecksum) ToMap() map[string]any { + r := make(map[string]any) + r["checksumCrc32"] = j.ChecksumCRC32 + r["checksumCrc32C"] = j.ChecksumCRC32C + r["checksumCrc64Nvme"] = j.ChecksumCRC64NVME + r["checksumSha1"] = j.ChecksumSHA1 + r["checksumSha256"] = j.ChecksumSHA256 + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectLockConfig) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.SetStorageObjectLockConfig.ToMap()) + r["objectLock"] = j.ObjectLock + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectMultipartInfo) ToMap() map[string]any { + r := make(map[string]any) + r["initiated"] = j.Initiated + r["key"] = j.Key + r["size"] = j.Size + r["storageClass"] = j.StorageClass + r["uploadId"] = j.UploadID + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageObjectParts) ToMap() map[string]any { + r := make(map[string]any) + r["isTruncated"] = j.IsTruncated + r["maxParts"] = j.MaxParts + r["nextPartNumberMarker"] = j.NextPartNumberMarker + r["partNumberMarker"] = j.PartNumberMarker + j_Parts := make([]any, len(j.Parts)) + for i, j_Parts_v := range j.Parts { + if j_Parts_v != nil { + j_Parts[i] = (*j_Parts_v) + } + } + r["parts"] = j_Parts + r["partsCount"] = j.PartsCount + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageOwner) ToMap() map[string]any { + r := make(map[string]any) + r["id"] = j.ID + r["name"] = j.DisplayName + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageReplicationConfig) ToMap() map[string]any { + r := make(map[string]any) + r["role"] = j.Role + j_Rules := make([]any, len(j.Rules)) + for i, j_Rules_v := range j.Rules { + j_Rules[i] = j_Rules_v + } + r["rules"] = j_Rules + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageReplicationDestination) ToMap() map[string]any { + r := make(map[string]any) + r["bucket"] = j.Bucket + r["storageClass"] = j.StorageClass + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageReplicationFilter) ToMap() map[string]any { + r := make(map[string]any) + if j.And != nil { + r["and"] = (*j.And) + } + r["rrefix"] = j.Prefix + if j.Tag != nil { + r["tag"] = (*j.Tag) + } + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageReplicationFilterAnd) ToMap() map[string]any { + r := make(map[string]any) + r["rrefix"] = j.Prefix + j_Tags := make([]any, len(j.Tags)) + for i, j_Tags_v := range j.Tags { + j_Tags[i] = j_Tags_v + } + r["tag"] = j_Tags + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageReplicationRule) ToMap() map[string]any { + r := make(map[string]any) + if j.DeleteMarkerReplication != nil { + r["deleteMarkerReplication"] = (*j.DeleteMarkerReplication) + } + if j.DeleteReplication != nil { + r["deleteReplication"] = (*j.DeleteReplication) + } + if j.Destination != nil { + r["destination"] = (*j.Destination) + } + if j.ExistingObjectReplication != nil { + r["existingObjectReplication"] = (*j.ExistingObjectReplication) + } + r["filter"] = j.Filter + r["id"] = j.ID + r["priority"] = j.Priority + if j.SourceSelectionCriteria != nil { + r["sourceSelectionCriteria"] = (*j.SourceSelectionCriteria) + } + r["status"] = j.Status + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageRestoreInfo) ToMap() map[string]any { + r := make(map[string]any) + r["expiryTime"] = j.ExpiryTime + r["ongoingRestore"] = j.OngoingRestore + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageTag) ToMap() map[string]any { + r := make(map[string]any) + r["key"] = j.Key + r["value"] = j.Value + + return r +} + +// ToMap encodes the struct to a value map +func (j StorageUploadInfo) ToMap() map[string]any { + r := make(map[string]any) + r = utils.MergeMap(r, j.StorageObjectChecksum.ToMap()) + r["bucket"] = j.Bucket + r["etag"] = j.ETag + r["expiration"] = j.Expiration + r["expirationRuleId"] = j.ExpirationRuleID + r["lastModified"] = j.LastModified + r["location"] = j.Location + r["name"] = j.Name + r["size"] = j.Size + r["versionId"] = j.VersionID + + return r +} + +// ScalarName get the schema name of the scalar +func (j ChecksumType) ScalarName() string { + return "ChecksumType" +} + +const ( + ChecksumTypeSha256 ChecksumType = "SHA256" + ChecksumTypeSha1 ChecksumType = "SHA1" + ChecksumTypeCrc32 ChecksumType = "CRC32" + ChecksumTypeCrc32C ChecksumType = "CRC32C" + ChecksumTypeCrc64Nvme ChecksumType = "CRC64NVME" + ChecksumTypeFullObjectCrc32 ChecksumType = "FullObjectCRC32" + ChecksumTypeFullObjectCrc32C ChecksumType = "FullObjectCRC32C" + ChecksumTypeNone ChecksumType = "None" +) + +var enumValues_ChecksumType = []ChecksumType{ChecksumTypeSha256, ChecksumTypeSha1, ChecksumTypeCrc32, ChecksumTypeCrc32C, ChecksumTypeCrc64Nvme, ChecksumTypeFullObjectCrc32, ChecksumTypeFullObjectCrc32C, ChecksumTypeNone} + +// ParseChecksumType parses a ChecksumType enum from string +func ParseChecksumType(input string) (ChecksumType, error) { + result := ChecksumType(input) + if !slices.Contains(enumValues_ChecksumType, result) { + return ChecksumType(""), errors.New("failed to parse ChecksumType, expect one of [SHA256, SHA1, CRC32, CRC32C, CRC64NVME, FullObjectCRC32, FullObjectCRC32C, None]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j ChecksumType) IsValid() bool { + return slices.Contains(enumValues_ChecksumType, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *ChecksumType) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseChecksumType(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *ChecksumType) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseChecksumType(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} + +// ScalarName get the schema name of the scalar +func (j StorageClientID) ScalarName() string { + return "StorageClientID" +} + +// ScalarName get the schema name of the scalar +func (j StorageLegalHoldStatus) ScalarName() string { + return "StorageLegalHoldStatus" +} + +const ( + StorageLegalHoldStatusOn StorageLegalHoldStatus = "ON" + StorageLegalHoldStatusOff StorageLegalHoldStatus = "OFF" +) + +var enumValues_StorageLegalHoldStatus = []StorageLegalHoldStatus{StorageLegalHoldStatusOn, StorageLegalHoldStatusOff} + +// ParseStorageLegalHoldStatus parses a StorageLegalHoldStatus enum from string +func ParseStorageLegalHoldStatus(input string) (StorageLegalHoldStatus, error) { + result := StorageLegalHoldStatus(input) + if !slices.Contains(enumValues_StorageLegalHoldStatus, result) { + return StorageLegalHoldStatus(""), errors.New("failed to parse StorageLegalHoldStatus, expect one of [ON, OFF]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j StorageLegalHoldStatus) IsValid() bool { + return slices.Contains(enumValues_StorageLegalHoldStatus, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *StorageLegalHoldStatus) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseStorageLegalHoldStatus(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *StorageLegalHoldStatus) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseStorageLegalHoldStatus(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} + +// ScalarName get the schema name of the scalar +func (j StorageObjectReplicationStatus) ScalarName() string { + return "StorageObjectReplicationStatus" +} + +const ( + StorageObjectReplicationStatusCompleted StorageObjectReplicationStatus = "COMPLETED" + StorageObjectReplicationStatusPending StorageObjectReplicationStatus = "PENDING" + StorageObjectReplicationStatusFailed StorageObjectReplicationStatus = "FAILED" + StorageObjectReplicationStatusReplica StorageObjectReplicationStatus = "REPLICA" +) + +var enumValues_StorageObjectReplicationStatus = []StorageObjectReplicationStatus{StorageObjectReplicationStatusCompleted, StorageObjectReplicationStatusPending, StorageObjectReplicationStatusFailed, StorageObjectReplicationStatusReplica} + +// ParseStorageObjectReplicationStatus parses a StorageObjectReplicationStatus enum from string +func ParseStorageObjectReplicationStatus(input string) (StorageObjectReplicationStatus, error) { + result := StorageObjectReplicationStatus(input) + if !slices.Contains(enumValues_StorageObjectReplicationStatus, result) { + return StorageObjectReplicationStatus(""), errors.New("failed to parse StorageObjectReplicationStatus, expect one of [COMPLETED, PENDING, FAILED, REPLICA]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j StorageObjectReplicationStatus) IsValid() bool { + return slices.Contains(enumValues_StorageObjectReplicationStatus, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *StorageObjectReplicationStatus) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseStorageObjectReplicationStatus(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *StorageObjectReplicationStatus) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseStorageObjectReplicationStatus(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} + +// ScalarName get the schema name of the scalar +func (j StorageReplicationRuleStatus) ScalarName() string { + return "StorageReplicationRuleStatus" +} + +const ( + StorageReplicationRuleStatusEnabled StorageReplicationRuleStatus = "Enabled" + StorageReplicationRuleStatusDisabled StorageReplicationRuleStatus = "Disabled" +) + +var enumValues_StorageReplicationRuleStatus = []StorageReplicationRuleStatus{StorageReplicationRuleStatusEnabled, StorageReplicationRuleStatusDisabled} + +// ParseStorageReplicationRuleStatus parses a StorageReplicationRuleStatus enum from string +func ParseStorageReplicationRuleStatus(input string) (StorageReplicationRuleStatus, error) { + result := StorageReplicationRuleStatus(input) + if !slices.Contains(enumValues_StorageReplicationRuleStatus, result) { + return StorageReplicationRuleStatus(""), errors.New("failed to parse StorageReplicationRuleStatus, expect one of [Enabled, Disabled]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j StorageReplicationRuleStatus) IsValid() bool { + return slices.Contains(enumValues_StorageReplicationRuleStatus, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *StorageReplicationRuleStatus) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseStorageReplicationRuleStatus(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *StorageReplicationRuleStatus) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseStorageReplicationRuleStatus(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} + +// ScalarName get the schema name of the scalar +func (j StorageRetentionMode) ScalarName() string { + return "StorageRetentionMode" +} + +const ( + StorageRetentionModeGovernance StorageRetentionMode = "GOVERNANCE" + StorageRetentionModeCompliance StorageRetentionMode = "COMPLIANCE" +) + +var enumValues_StorageRetentionMode = []StorageRetentionMode{StorageRetentionModeGovernance, StorageRetentionModeCompliance} + +// ParseStorageRetentionMode parses a StorageRetentionMode enum from string +func ParseStorageRetentionMode(input string) (StorageRetentionMode, error) { + result := StorageRetentionMode(input) + if !slices.Contains(enumValues_StorageRetentionMode, result) { + return StorageRetentionMode(""), errors.New("failed to parse StorageRetentionMode, expect one of [GOVERNANCE, COMPLIANCE]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j StorageRetentionMode) IsValid() bool { + return slices.Contains(enumValues_StorageRetentionMode, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *StorageRetentionMode) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseStorageRetentionMode(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *StorageRetentionMode) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseStorageRetentionMode(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} + +// ScalarName get the schema name of the scalar +func (j StorageRetentionValidityUnit) ScalarName() string { + return "StorageRetentionValidityUnit" +} + +const ( + StorageRetentionValidityUnitDays StorageRetentionValidityUnit = "DAYS" + StorageRetentionValidityUnitYears StorageRetentionValidityUnit = "YEARS" +) + +var enumValues_StorageRetentionValidityUnit = []StorageRetentionValidityUnit{StorageRetentionValidityUnitDays, StorageRetentionValidityUnitYears} + +// ParseStorageRetentionValidityUnit parses a StorageRetentionValidityUnit enum from string +func ParseStorageRetentionValidityUnit(input string) (StorageRetentionValidityUnit, error) { + result := StorageRetentionValidityUnit(input) + if !slices.Contains(enumValues_StorageRetentionValidityUnit, result) { + return StorageRetentionValidityUnit(""), errors.New("failed to parse StorageRetentionValidityUnit, expect one of [DAYS, YEARS]") + } + + return result, nil +} + +// IsValid checks if the value is invalid +func (j StorageRetentionValidityUnit) IsValid() bool { + return slices.Contains(enumValues_StorageRetentionValidityUnit, j) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *StorageRetentionValidityUnit) UnmarshalJSON(b []byte) error { + var rawValue string + if err := json.Unmarshal(b, &rawValue); err != nil { + return err + } + + value, err := ParseStorageRetentionValidityUnit(rawValue) + if err != nil { + return err + } + + *j = value + return nil +} + +// FromValue decodes the scalar from an unknown value +func (s *StorageRetentionValidityUnit) FromValue(value any) error { + valueStr, err := utils.DecodeNullableString(value) + if err != nil { + return err + } + if valueStr == nil { + return nil + } + result, err := ParseStorageRetentionValidityUnit(*valueStr) + if err != nil { + return err + } + + *s = result + return nil +} diff --git a/connector/storage/common/types.go b/connector/storage/common/types.go new file mode 100644 index 0000000..804a586 --- /dev/null +++ b/connector/storage/common/types.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "slices" + + "github.com/invopop/jsonschema" +) + +// StorageClientID the storage client ID enum. +// @scalar StorageClientID string +type StorageClientID string + +// StorageProviderType represents a storage provider type enum. +type StorageProviderType string + +const ( + S3 StorageProviderType = "s3" + GoogleStorage StorageProviderType = "gs" +) + +var enumValues_StorageProviderType = []StorageProviderType{ + S3, GoogleStorage, +} + +// ParseStorageProviderType parses the StorageProviderType from string. +func ParseStorageProviderType(input string) (StorageProviderType, error) { + result := StorageProviderType(input) + if !slices.Contains(enumValues_StorageProviderType, result) { + return "", fmt.Errorf("invalid StorageProviderType, expected one of %v, got: %s", enumValues_StorageProviderType, input) + } + + return result, nil +} + +// Validate checks if the provider type is valid. +func (spt StorageProviderType) Validate() error { + _, err := ParseStorageProviderType(string(spt)) + + return err +} + +// JSONSchema is used to generate a custom jsonschema. +func (spt StorageProviderType) JSONSchema() *jsonschema.Schema { + enumValues := make([]any, len(enumValues_StorageProviderType)) + for i, item := range enumValues_StorageProviderType { + enumValues[i] = string(item) + } + + return &jsonschema.Schema{ + Type: "string", + Enum: enumValues, + } +} diff --git a/connector/storage/config.go b/connector/storage/config.go new file mode 100644 index 0000000..31a7377 --- /dev/null +++ b/connector/storage/config.go @@ -0,0 +1,449 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net/url" + "slices" + "strconv" + "strings" + "time" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/hasura/ndc-storage/connector/storage/minio" + "github.com/invopop/jsonschema" +) + +var ( + errRequireAccessKeyID = errors.New("accessKeyId is required") + errRequireSecretAccessKey = errors.New("secretAccessKey is required") + errRequireStorageEndpoint = errors.New("endpoint is required") +) + +// Client wraps the storage client with additional information. +type Client struct { + id common.StorageClientID + defaultBucket string + defaultPresignedExpiry *time.Duration + allowedBuckets []string + + common.StorageClient +} + +// ValidateBucket checks if the bucket name is valid, or returns the default bucket if empty. +func (c *Client) ValidateBucket(key string) (string, error) { + if key != "" { + if key == c.defaultBucket || len(c.allowedBuckets) == 0 || slices.Contains(c.allowedBuckets, key) { + return key, nil + } + + return "", schema.UnprocessableContentError(fmt.Sprintf("you are not allowed to access `%s` bucket, client id `%s`", key, c.id), nil) + } + + if c.defaultBucket == "" { + return "", schema.UnprocessableContentError("bucket name is required", nil) + } + + return c.defaultBucket, nil +} + +// EnvStorageProviderType represents the env configuration for the storage provider type. +type EnvStorageProviderType struct { + utils.EnvString `yaml:",inline"` +} + +// Validate checks if the configration is valid. +func (espt EnvStorageProviderType) Validate() (common.StorageProviderType, error) { + rawProviderType, err := espt.EnvString.GetOrDefault("") + if err != nil { + return "", err + } + + providerType, err := common.ParseStorageProviderType(rawProviderType) + if err != nil { + return "", err + } + + return providerType, nil +} + +// JSONSchema is used to generate a custom jsonschema. +func (espt EnvStorageProviderType) JSONSchema() *jsonschema.Schema { + result := &jsonschema.Schema{ + Type: "object", + Properties: jsonschema.NewProperties(), + AnyOf: []*jsonschema.Schema{ + { + Required: []string{"value"}, + }, + { + Required: []string{"env"}, + }, + }, + } + + result.Properties.Set("env", &jsonschema.Schema{ + Type: "string", + }) + + result.Properties.Set("value", common.StorageProviderType("").JSONSchema()) + + return result +} + +// ClientConfig represent the raw configuration of a storage provider client. +type ClientConfig struct { + // The unique identity of a client. Use this setting if there are many configured clients. + ID string `json:"id,omitempty" yaml:"id,omitempty"` + // Cloud provider type of the storage client. + Type EnvStorageProviderType `json:"type" yaml:"type"` + // Default bucket name to be set if the user doesn't specify any bucket. + DefaultBucket utils.EnvString `json:"defaultBucket" yaml:"defaultBucket"` + // Endpoint of the storage server. Required for other S3 compatible services such as MinIO, Cloudflare R2, DigitalOcean Spaces, etc... + Endpoint *utils.EnvString `json:"endpoint,omitempty" yaml:"endpoint,omitempty"` + // The public host to be used for presigned URL generation. + PublicHost *utils.EnvString `json:"publicHost,omitempty" yaml:"publicHost,omitempty"` + // Optional region. + Region *utils.EnvString `json:"region,omitempty" jsonschema:"nullable" yaml:"region,omitempty"` + // Maximum number of retry times. + MaxRetries *int `json:"maxRetries,omitempty" jsonschema:"min=1,default=10" yaml:"maxRetries,omitempty"` + // The default expiry for presigned URL generation. The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. + DefaultPresignedExpiry *string `json:"defaultPresignedExpiry,omitempty" jsonschema:"pattern=[0-9]+(s|m|h),default=24h" yaml:"defaultPresignedExpiry,omitempty"` + // Authentication credetials. + Authentication AuthCredentials `json:"authentication" yaml:"authentication"` + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool `json:"trailingHeaders,omitempty" yaml:"trailingHeaders,omitempty"` + // Allowed buckets. This setting prevents users to get buckets and objects outside the list. + // However, it's recommended to restrict the permissions for the IAM credentials. + // This setting is useful to let the connector know which buckets belong to this client. + // The empty value means all buckets are allowed. The storage server will handle the validation. + AllowedBuckets []string `json:"allowedBuckets,omitempty" yaml:"allowedBuckets,omitempty"` +} + +// Validate checks if the configration is valid. +func (sc ClientConfig) Validate() error { + providerType, err := sc.Type.Validate() + if err != nil { + return fmt.Errorf("type: %w", err) + } + + switch providerType { + case common.S3, common.GoogleStorage: + _, err := sc.toMinioConfig(providerType) + if err != nil && !errors.Is(err, errRequireAccessKeyID) && !errors.Is(err, errRequireSecretAccessKey) && !errors.Is(err, errRequireStorageEndpoint) { + return err + } + } + + return nil +} + +// ToStorageClient validates and create the storage client from config. +func (sc ClientConfig) ToStorageClient(ctx context.Context, logger *slog.Logger) (common.StorageClient, error) { + providerType, err := sc.Type.Validate() + if err != nil { + return nil, fmt.Errorf("type: %w", err) + } + + switch providerType { + case common.S3, common.GoogleStorage: + return sc.toMinioClient(ctx, logger, providerType) + } + + return nil, errors.New("unsupported storage client: " + string(providerType)) +} + +func (sc ClientConfig) toMinioClient(ctx context.Context, logger *slog.Logger, providerType common.StorageProviderType) (common.StorageClient, error) { + config, err := sc.toMinioConfig(providerType) + if err != nil { + return nil, err + } + + return minio.New(ctx, config, logger) +} + +func (sc ClientConfig) toMinioConfig(providerType common.StorageProviderType) (*minio.ClientConfig, error) { + endpoint, port, useSSL, err := sc.parseEndpoint() + if err != nil { + return nil, err + } + + if endpoint == "" { + switch providerType { + case common.S3: + endpoint = "s3.amazonaws.com" + useSSL = true + case common.GoogleStorage: + endpoint = "storage.googleapis.com" + useSSL = true + default: + return nil, errRequireStorageEndpoint + } + } + + result := &minio.ClientConfig{ + Type: providerType, + Endpoint: endpoint, + Secure: useSSL, + Port: port, + TrailingHeaders: sc.TrailingHeaders, + } + + creds, err := sc.Authentication.toMinioAuthConfig() + if err != nil { + return nil, err + } + + result.AuthConfig = *creds + + if sc.MaxRetries != nil { + maxRetries := *sc.MaxRetries + if maxRetries <= -1 { + maxRetries = 1 + } + + result.MaxRetries = maxRetries + } + + if sc.PublicHost != nil { + publicHost, err := sc.PublicHost.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("publicHost: %w", err) + } + + if strings.HasPrefix(publicHost, "http") { + result.PublicHost, err = url.Parse(publicHost) + if err != nil { + return nil, fmt.Errorf("publicHost: %w", err) + } + } else { + result.PublicHost = &url.URL{ + Host: publicHost, + } + } + } + + if sc.Region != nil { + result.Region, err = sc.Region.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("region: %w", err) + } + } + + return result, nil +} + +func (sc ClientConfig) parseEndpoint() (string, int, bool, error) { + port := 80 + if sc.Endpoint == nil { + return "", port, false, nil + } + + var endpoint string + var useSSL bool + + rawEndpoint, err := sc.Endpoint.GetOrDefault("") + if err != nil { + return "", port, false, fmt.Errorf("endpoint: %w", err) + } + + if rawEndpoint == "" { + return endpoint, port, useSSL, nil + } + + endpointURL, err := url.Parse(rawEndpoint) + if err != nil { + return "", port, false, fmt.Errorf("invalid endpoint url: %w", err) + } + + if !strings.HasPrefix(endpointURL.Scheme, "http") { + return "", port, false, errors.New("invalid endpoint url http scheme: " + endpointURL.Scheme) + } + + endpoint = endpointURL.Host + + if endpointURL.Scheme == "https" { + useSSL = true + port = 443 + } + + rawPort := endpointURL.Port() + if rawPort != "" { + p, err := strconv.Atoi(rawPort) + if err != nil { + return "", 0, false, fmt.Errorf("invalid endpoint port: %s", rawPort) + } + + port = p + } + + return endpoint, port, useSSL, nil +} + +// AuthType represents the authentication type enum. +type AuthType string + +const ( + AuthTypeStatic = "static" + AuthTypeIAM = "iam" +) + +var enumValues_AuthType = []AuthType{ + AuthTypeStatic, AuthTypeIAM, +} + +// ParseStorageProviderType parses the StorageProviderType from string. +func ParseStorageProviderType(input string) (AuthType, error) { + result := AuthType(input) + if !slices.Contains(enumValues_AuthType, result) { + return "", fmt.Errorf("invalid AuthType, expected one of %v, got: %s", enumValues_AuthType, input) + } + + return result, nil +} + +// Validate checks if the provider type is valid. +func (spt AuthType) Validate() error { + _, err := ParseStorageProviderType(string(spt)) + + return err +} + +// AuthCredentials represent the authentication credentials infomartion. +type AuthCredentials struct { + // The authentication type + Type AuthType `json:"type" yaml:"type"` + // Access Key ID. + AccessKeyID *utils.EnvString `json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty"` + // Secret Access Key. + SecretAccessKey *utils.EnvString `json:"secretAccessKey,omitempty" yaml:"secretAccessKey,omitempty"` + // Optional temporary session token credentials. Used for testing only. + // See https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + SessionToken *utils.EnvString `json:"sessionToken,omitempty" yaml:"sessionToken,omitempty"` + // Custom endpoint to fetch IAM role credentials. + IAMAuthEndpoint *utils.EnvString `json:"iamAuthEndpoint,omitempty" yaml:"iamAuthEndpoint,omitempty"` +} + +// JSONSchema is used to generate a custom jsonschema. +func (ac AuthCredentials) JSONSchema() *jsonschema.Schema { + envStringRef := &jsonschema.Schema{ + Ref: "#/$defs/EnvString", + } + + staticProps := jsonschema.NewProperties() + staticProps.Set("type", &jsonschema.Schema{ + Type: "string", + Enum: []any{AuthTypeStatic}, + }) + staticProps.Set("accessKeyId", envStringRef) + staticProps.Set("secretAccessKey", envStringRef) + staticProps.Set("sessionToken", envStringRef) + + iamProps := jsonschema.NewProperties() + iamProps.Set("type", &jsonschema.Schema{ + Type: "string", + Enum: []any{AuthTypeIAM}, + }) + iamProps.Set("iamAuthEndpoint", envStringRef) + + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + { + Type: "object", + Properties: staticProps, + Required: []string{"type", "accessKeyId", "secretAccessKey"}, + }, + { + Type: "object", + Properties: iamProps, + Required: []string{"type"}, + }, + }, + } +} + +func (ac AuthCredentials) toMinioAuthConfig() (*minio.AuthConfig, error) { + switch ac.Type { + case AuthTypeIAM: + return ac.parseIAMAuth() + case AuthTypeStatic: + return ac.parseStaticAccessIDSecret() + default: + return nil, fmt.Errorf("unsupported auth type %s", ac.Type) + } +} + +func (ac AuthCredentials) parseIAMAuth() (*minio.AuthConfig, error) { + rawIAMEndpoint, err := ac.IAMAuthEndpoint.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("iamAuthEndpoint: %w", err) + } + + if rawIAMEndpoint != "" { + iamEndpoint, err := url.Parse(rawIAMEndpoint) + if err != nil { + return nil, fmt.Errorf("iamAuthEndpoint: %w", err) + } + + if !strings.HasPrefix(iamEndpoint.Scheme, "http") { + return nil, errors.New("iamAuthEndpoint: invalid http scheme " + iamEndpoint.Scheme) + } + } + + return &minio.AuthConfig{ + UseIAMAuth: true, + IAMAuthEndpoint: rawIAMEndpoint, + }, nil +} + +func (ac AuthCredentials) parseStaticAccessIDSecret() (*minio.AuthConfig, error) { + if ac.AccessKeyID == nil { + return nil, errRequireAccessKeyID + } + + if ac.SecretAccessKey == nil { + return nil, errRequireSecretAccessKey + } + + accessKeyID, err := ac.AccessKeyID.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("accessKeyID: %w", err) + } + + if accessKeyID == "" { + return nil, errRequireAccessKeyID + } + + secretAccessKey, err := ac.SecretAccessKey.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("secretAccessKey: %w", err) + } + + if secretAccessKey == "" { + return nil, errRequireSecretAccessKey + } + + var sessionToken string + if ac.SessionToken != nil { + sessionToken, err = ac.SessionToken.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("sessionToken: %w", err) + } + } + + return &minio.AuthConfig{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + SessionToken: sessionToken, + }, nil +} + +// FormatTimestamp formats the Time value to string +func FormatTimestamp(value time.Time) string { + return value.Format(time.RFC3339) +} diff --git a/connector/storage/manager.go b/connector/storage/manager.go new file mode 100644 index 0000000..02ed350 --- /dev/null +++ b/connector/storage/manager.go @@ -0,0 +1,125 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "log/slog" + "slices" + "strconv" + "time" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" +) + +// Manager represents the high-level client that manages internal clients and configurations. +type Manager struct { + clients []Client +} + +// NewManager creates a storage client manager instace. +func NewManager(ctx context.Context, configs []ClientConfig, logger *slog.Logger) (*Manager, error) { + if len(configs) == 0 { + return nil, errors.New("failed to initialize storage clients: config is empty") + } + + result := &Manager{ + clients: make([]Client, len(configs)), + } + + for i, config := range configs { + defaultBucket, err := config.DefaultBucket.GetOrDefault("") + if err != nil { + return nil, fmt.Errorf("failed to initialize storage client %d; defaultBucket: %w", i, err) + } + + client, err := config.ToStorageClient(ctx, logger) + if err != nil { + return nil, fmt.Errorf("failed to initialize storage client %d: %w", i, err) + } + + c := Client{ + id: common.StorageClientID(config.ID), + defaultBucket: defaultBucket, + allowedBuckets: config.AllowedBuckets, + StorageClient: client, + } + + if config.DefaultPresignedExpiry != nil { + presignedExpiry, err := time.ParseDuration(*config.DefaultPresignedExpiry) + if err != nil { + return nil, fmt.Errorf("defaultPresignedExpiry: %w", err) + } + + c.defaultPresignedExpiry = &presignedExpiry + } + + if c.id == "" { + c.id = common.StorageClientID(strconv.Itoa(i)) + } + + result.clients[i] = c + } + + return result, nil +} + +// GetClient gets the inner client by key. +func (m *Manager) GetClient(clientID *common.StorageClientID) (*Client, bool) { + if clientID == nil || *clientID == "" { + return &m.clients[0], true + } + + for _, c := range m.clients { + if c.id == *clientID { + return &c, true + } + } + + return nil, false +} + +// GetClientIDs gets all client IDs. +func (m *Manager) GetClientIDs() []string { + results := make([]string, len(m.clients)) + + for i, client := range m.clients { + results[i] = string(client.id) + } + + return results +} + +// GetClient gets the inner client by key and bucket name. +func (m *Manager) GetClientAndBucket(clientID *common.StorageClientID, bucketName string) (*Client, string, error) { + hasClientID := clientID != nil && *clientID != "" + if !hasClientID && bucketName == "" { + client, _ := m.GetClient(nil) + + return client, client.defaultBucket, nil + } + + if hasClientID { + client, ok := m.GetClient(clientID) + if !ok { + return nil, "", schema.InternalServerError("client not found: "+string(*clientID), nil) + } + + bucketName, err := client.ValidateBucket(bucketName) + if err != nil { + return nil, "", err + } + + return client, bucketName, nil + } + + for _, c := range m.clients { + if c.defaultBucket == bucketName || slices.Contains(c.allowedBuckets, bucketName) { + return &c, bucketName, nil + } + } + + // return the first client by default + return &m.clients[0], bucketName, nil +} diff --git a/connector/storage/minio/bucket.go b/connector/storage/minio/bucket.go new file mode 100644 index 0000000..6107122 --- /dev/null +++ b/connector/storage/minio/bucket.go @@ -0,0 +1,504 @@ +package minio + +import ( + "context" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/tags" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// MakeBucket creates a new bucket. +func (mc *Client) MakeBucket(ctx context.Context, args *common.MakeStorageBucketOptions) error { + ctx, span := mc.startOtelSpan(ctx, "MakeBucket", args.Name) + defer span.End() + + err := mc.client.MakeBucket(ctx, args.Name, minio.MakeBucketOptions{ + Region: args.Region, + ObjectLocking: args.ObjectLocking, + }) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// ListBuckets lists all buckets. +func (mc *Client) ListBuckets(ctx context.Context) ([]common.StorageBucketInfo, error) { + ctx, span := mc.startOtelSpan(ctx, "ListBuckets", "") + defer span.End() + + bucketInfos, err := mc.client.ListBuckets(ctx) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + results := make([]common.StorageBucketInfo, len(bucketInfos)) + for i, item := range bucketInfos { + results[i] = common.StorageBucketInfo{ + Name: item.Name, + CreationDate: item.CreationDate, + } + } + + span.SetAttributes(attribute.Int("storage.bucket_count", len(results))) + + return results, nil +} + +// BucketExists checks if a bucket exists. +func (mc *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { + ctx, span := mc.startOtelSpan(ctx, "BucketExists", bucketName) + defer span.End() + + existed, err := mc.client.BucketExists(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return false, serializeErrorResponse(err) + } + + span.SetAttributes(attribute.Bool("storage.bucket_exist", existed)) + + return existed, nil +} + +// RemoveBucket removes a bucket, bucket should be empty to be successfully removed. +func (mc *Client) RemoveBucket(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveBucket", bucketName) + defer span.End() + + err := mc.client.RemoveBucket(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetBucketTagging gets tags of a bucket. +func (mc *Client) GetBucketTagging(ctx context.Context, bucketName string) (map[string]string, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketTagging", bucketName) + defer span.End() + + bucketTags, err := mc.client.GetBucketTagging(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := bucketTags.ToMap() + for key, value := range result { + span.SetAttributes(attribute.String("storage.bucket_tag"+key, value)) + } + + return result, nil +} + +// Removes all tags on a bucket. +func (mc *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveBucketTagging", bucketName) + defer span.End() + + err := mc.client.RemoveBucketTagging(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// SetBucketTagging sets tags to a bucket. +func (mc *Client) SetBucketTagging(ctx context.Context, args *common.SetStorageBucketTaggingArguments) error { + ctx, span := mc.startOtelSpan(ctx, "SetBucketTagging", args.Bucket) + defer span.End() + + for key, value := range args.Tags { + span.SetAttributes(attribute.String("storage.bucket_tag"+key, value)) + } + + inputTags, err := tags.NewTags(args.Tags, false) + if err != nil { + span.SetStatus(codes.Error, "failed to convert minio tags") + span.RecordError(err) + + return schema.UnprocessableContentError(err.Error(), nil) + } + + err = mc.client.SetBucketTagging(ctx, args.Bucket, inputTags) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetBucketPolicy gets access permissions on a bucket or a prefix. +func (mc *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketPolicy", bucketName) + defer span.End() + + result, err := mc.client.GetBucketPolicy(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return "", serializeErrorResponse(err) + } + + return result, nil +} + +// GetBucketNotification gets notification configuration on a bucket. +func (mc *Client) GetBucketNotification(ctx context.Context, bucketName string) (*common.NotificationConfig, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketNotification", bucketName) + defer span.End() + + result, err := mc.client.GetBucketNotification(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + return serializeBucketNotificationConfig(result), nil +} + +// SetBucketNotification sets a new bucket notification on a bucket. +func (mc *Client) SetBucketNotification(ctx context.Context, bucketName string, config common.NotificationConfig) error { + ctx, span := mc.startOtelSpan(ctx, "SetBucketNotification", bucketName) + defer span.End() + + input, err := validateBucketNotificationConfig(config) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return schema.UnprocessableContentError(err.Error(), nil) + } + + if err := mc.client.SetBucketNotification(ctx, bucketName, *input); err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// RemoveAllBucketNotification removes all configured bucket notifications on a bucket. +func (mc *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveAllBucketNotification", bucketName) + defer span.End() + + err := mc.client.RemoveAllBucketNotification(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetBucketVersioning gets the versioning configuration set on a bucket. +func (mc *Client) GetBucketVersioning(ctx context.Context, bucketName string) (*common.StorageBucketVersioningConfiguration, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketVersioning", bucketName) + defer span.End() + + rawResult, err := mc.client.GetBucketVersioning(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := &common.StorageBucketVersioningConfiguration{ + ExcludedPrefixes: make([]string, len(rawResult.ExcludedPrefixes)), + ExcludeFolders: &rawResult.ExcludeFolders, + } + + if rawResult.Status != "" { + result.Status = &rawResult.Status + } + + if rawResult.MFADelete != "" { + result.MFADelete = &rawResult.MFADelete + } + + for i, prefix := range rawResult.ExcludedPrefixes { + result.ExcludedPrefixes[i] = prefix.Prefix + } + + return result, nil +} + +// SetBucketReplication sets replication configuration on a bucket. Role can be obtained by first defining the replication target +// to associate the source and destination buckets for replication with the replication endpoint. +func (mc *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg common.StorageReplicationConfig) error { + ctx, span := mc.startOtelSpan(ctx, "SetBucketReplication", bucketName) + defer span.End() + + input := validateBucketReplicationConfig(cfg) + + err := mc.client.SetBucketReplication(ctx, bucketName, input) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// Get current replication config on a bucket. +func (mc *Client) GetBucketReplication(ctx context.Context, bucketName string) (*common.StorageReplicationConfig, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketReplication", bucketName) + defer span.End() + + result, err := mc.client.GetBucketReplication(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + return serializeBucketReplicationConfig(result), nil +} + +// RemoveBucketReplication removes replication configuration on a bucket. +func (mc *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveBucketReplication", bucketName) + defer span.End() + + err := mc.client.RemoveBucketReplication(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +func validateBucketReplicationConfig(input common.StorageReplicationConfig) replication.Config { + result := replication.Config{ + Rules: make([]replication.Rule, len(input.Rules)), + } + + if input.Role != nil { + result.Role = *input.Role + } + + for i, item := range input.Rules { + result.Rules[i] = validateBucketReplicationRule(item) + } + + return result +} + +func validateBucketReplicationRule(item common.StorageReplicationRule) replication.Rule { + rule := replication.Rule{ + Status: replication.Status(item.Status), + Priority: item.Priority, + Filter: validateBucketReplicationFilter(item.Filter), + } + + if item.ID != nil { + rule.ID = *item.ID + } + + if item.DeleteMarkerReplication != nil && item.DeleteMarkerReplication.Status != "" { + rule.DeleteMarkerReplication.Status = replication.Status(item.DeleteMarkerReplication.Status) + } + + if item.DeleteReplication != nil && item.DeleteReplication.Status != "" { + rule.DeleteReplication.Status = replication.Status(item.DeleteReplication.Status) + } + + if item.ExistingObjectReplication != nil && item.ExistingObjectReplication.Status != "" { + rule.ExistingObjectReplication.Status = replication.Status(item.ExistingObjectReplication.Status) + } + + if item.SourceSelectionCriteria != nil && item.SourceSelectionCriteria.ReplicaModifications != nil && item.SourceSelectionCriteria.ReplicaModifications.Status != "" { + rule.SourceSelectionCriteria.ReplicaModifications.Status = replication.Status(item.SourceSelectionCriteria.ReplicaModifications.Status) + } + + if item.Destination != nil { + rule.Destination = replication.Destination{ + Bucket: item.Destination.Bucket, + } + + if item.Destination.StorageClass != nil { + rule.Destination.StorageClass = *item.Destination.StorageClass + } + } + + return rule +} + +func validateBucketReplicationFilter(input common.StorageReplicationFilter) replication.Filter { + result := replication.Filter{} + + if input.Prefix != nil { + result.Prefix = *input.Prefix + } + + if input.Tag != nil { + if input.Tag.Key != nil { + result.Tag.Key = *input.Tag.Key + } + + if input.Tag.Value != nil { + result.Tag.Value = *input.Tag.Value + } + } + + if input.And != nil { + if input.And.Prefix != nil { + result.And.Prefix = *input.Prefix + } + + result.And.Tags = make([]replication.Tag, len(input.And.Tags)) + + for i, tag := range input.And.Tags { + t := replication.Tag{} + if tag.Key != nil { + t.Key = *tag.Key + } + + if tag.Value != nil { + t.Value = *tag.Value + } + + result.And.Tags[i] = t + } + } + + return result +} + +func serializeBucketReplicationConfig(input replication.Config) *common.StorageReplicationConfig { + result := common.StorageReplicationConfig{ + Rules: make([]common.StorageReplicationRule, len(input.Rules)), + } + + if input.Role != "" { + result.Role = &input.Role + } + + for i, item := range input.Rules { + result.Rules[i] = serializeBucketReplicationRule(item) + } + + return &result +} + +func serializeBucketReplicationRule(item replication.Rule) common.StorageReplicationRule { + rule := common.StorageReplicationRule{ + Status: common.StorageReplicationRuleStatus(item.Status), + Priority: item.Priority, + } + + if item.ID != "" { + rule.ID = &item.ID + } + + if item.DeleteMarkerReplication.Status != "" { + rule.DeleteMarkerReplication.Status = common.StorageReplicationRuleStatus(item.DeleteMarkerReplication.Status) + } + + if item.DeleteReplication.Status != "" { + rule.DeleteReplication.Status = common.StorageReplicationRuleStatus(item.DeleteReplication.Status) + } + + if item.ExistingObjectReplication.Status != "" { + rule.ExistingObjectReplication.Status = common.StorageReplicationRuleStatus(item.ExistingObjectReplication.Status) + } + + if item.SourceSelectionCriteria.ReplicaModifications.Status != "" { + rule.SourceSelectionCriteria = &common.SourceSelectionCriteria{ + ReplicaModifications: &common.ReplicaModifications{ + Status: common.StorageReplicationRuleStatus(item.SourceSelectionCriteria.ReplicaModifications.Status), + }, + } + } + + rule.Destination = &common.StorageReplicationDestination{ + Bucket: item.Destination.Bucket, + } + + if item.Destination.StorageClass != "" { + rule.Destination.StorageClass = &item.Destination.StorageClass + } + + if item.Filter.Prefix != "" { + rule.Filter.Prefix = &item.Filter.Prefix + } + + if item.Filter.Tag.Key != "" || item.Filter.Tag.Value != "" { + rule.Filter.Tag = &common.StorageTag{} + + if item.Filter.Tag.Key != "" { + rule.Filter.Tag.Key = &item.Filter.Tag.Key + } + + if item.Filter.Tag.Value != "" { + rule.Filter.Tag.Value = &item.Filter.Tag.Value + } + } + + if item.Filter.And.Prefix != "" || len(item.Filter.And.Tags) > 0 { + rule.Filter.And = &common.StorageReplicationFilterAnd{} + if item.Filter.And.Prefix != "" { + rule.Filter.And.Prefix = &item.Filter.Prefix + } + + rule.Filter.And.Tags = make([]common.StorageTag, len(item.Filter.And.Tags)) + + for i, tag := range item.Filter.And.Tags { + t := common.StorageTag{} + if tag.Key != "" { + t.Key = &tag.Key + } + + if tag.Value != "" { + t.Value = &tag.Value + } + + rule.Filter.And.Tags[i] = t + } + } + + return rule +} diff --git a/connector/storage/minio/client.go b/connector/storage/minio/client.go new file mode 100644 index 0000000..ad54297 --- /dev/null +++ b/connector/storage/minio/client.go @@ -0,0 +1,275 @@ +package minio + +import ( + "bytes" + "context" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + + "github.com/hasura/ndc-sdk-go/connector" + "github.com/hasura/ndc-sdk-go/utils" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +var tracer = connector.NewTracer("connector/storage/minio") + +// AuthConfig represent the authentication config of the minio client. +type AuthConfig struct { + AccessKeyID string + SecretAccessKey string + SessionToken string + UseIAMAuth bool + IAMAuthEndpoint string +} + +// ClientConfig represent the configuration of the minio client. +type ClientConfig struct { + Type common.StorageProviderType + Endpoint string + Region string + PublicHost *url.URL + Port int + MaxRetries int + Secure bool + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + + AuthConfig +} + +// Client prepresents a Minio client wrapper. +type Client struct { + publicHost *url.URL + providerType common.StorageProviderType + isDebug bool + client *minio.Client +} + +var _ common.StorageClient = &Client{} + +// New creates a new Minio client. +func New(ctx context.Context, cfg *ClientConfig, logger *slog.Logger) (*Client, error) { + mc := &Client{ + publicHost: cfg.PublicHost, + providerType: cfg.Type, + isDebug: utils.IsDebug(logger), + } + + transport, err := minio.DefaultTransport(cfg.Secure) + if err != nil { + return nil, err + } + + opts := &minio.Options{ + Secure: cfg.Secure, + Transport: transport, + Region: cfg.Region, + MaxRetries: cfg.MaxRetries, + TrailingHeaders: cfg.TrailingHeaders, + } + + if utils.IsDebug(logger) { + opts.Transport = debugRoundTripper{ + transport: transport, + propagator: otel.GetTextMapPropagator(), + port: cfg.Port, + logger: logger, + } + } else { + opts.Transport = roundTripper{ + transport: transport, + propagator: otel.GetTextMapPropagator(), + } + } + + if cfg.UseIAMAuth { + opts.Creds = credentials.NewIAM(cfg.IAMAuthEndpoint) + } else { + opts.Creds = credentials.NewStaticV4(cfg.AccessKeyID, cfg.SecretAccessKey, cfg.SessionToken) + } + + c, err := minio.New(cfg.Endpoint, opts) + if err != nil { + return nil, fmt.Errorf("failed to initialize the minio client: %w", err) + } + + mc.client = c + + return mc, nil +} + +type debugRoundTripper struct { + transport *http.Transport + propagator propagation.TextMapPropagator + port int + logger *slog.Logger +} + +func (mrt debugRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, span := tracer.Start(req.Context(), fmt.Sprintf("%s %s", req.Method, req.URL.Path), trace.WithSpanKind(trace.SpanKindClient)) + defer span.End() + + span.SetAttributes( + attribute.String("http.request.method", req.Method), + attribute.String("url.full", req.URL.String()), + attribute.String("server.address", req.URL.Hostname()), + attribute.Int("server.port", mrt.port), + attribute.String("network.protocol.name", "http"), + ) + + connector.SetSpanHeaderAttributes(span, "http.request.header.", req.Header) + + if req.ContentLength > 0 { + span.SetAttributes(attribute.Int64("http.request.body.size", req.ContentLength)) + } + + mrt.propagator.Inject(ctx, propagation.HeaderCarrier(req.Header)) + + requestLogAttrs := map[string]any{ + "url": req.URL.String(), + "method": req.Method, + "headers": connector.NewTelemetryHeaders(req.Header), + } + + if req.Body != nil && req.ContentLength > 0 && req.ContentLength <= 100*1024 { + rawBody, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + requestLogAttrs["body"] = string(rawBody) + + req.Body.Close() + req.Body = io.NopCloser(bytes.NewBuffer(rawBody)) + } + + logAttrs := []any{ + slog.String("type", "storage-client"), + slog.Any("request", requestLogAttrs), + } + + resp, err := mrt.transport.RoundTrip(req) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + slog.Debug("failed to execute the request: %s"+err.Error(), logAttrs...) + + return resp, err + } + + span.SetAttributes(attribute.Int("http.response.status_code", resp.StatusCode)) + connector.SetSpanHeaderAttributes(span, "http.response.header.", resp.Header) + + if resp.ContentLength >= 0 { + span.SetAttributes(attribute.Int64("http.response.size", resp.ContentLength)) + } + + respLogAttrs := map[string]any{ + "http_status": resp.StatusCode, + "headers": resp.Header, + } + + if resp.Body != nil { + respBody, err := io.ReadAll(resp.Body) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + logAttrs = append(logAttrs, slog.Any("response", respLogAttrs)) + + slog.Debug("failed to read response body: "+err.Error(), logAttrs...) + resp.Body.Close() + + return resp, err + } + + respLogAttrs["body"] = string(respBody) + logAttrs = append(logAttrs, slog.Any("response", respLogAttrs)) + slog.Debug(resp.Status, logAttrs...) + + resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewBuffer(respBody)) + + span.SetAttributes(attribute.Int("http.response.size", len(respBody))) + } + + if resp.StatusCode >= http.StatusBadRequest { + span.SetStatus(codes.Error, resp.Status) + } else { + slog.Debug("executed request successfully", logAttrs...) + } + + return resp, err +} + +type roundTripper struct { + transport *http.Transport + propagator propagation.TextMapPropagator +} + +func (rt roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.propagator.Inject(req.Context(), propagation.HeaderCarrier(req.Header)) + + return rt.transport.RoundTrip(req) +} + +// EnableVersioning enables bucket versioning support. +func (mc *Client) EnableVersioning(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "EnableVersioning", bucketName) + defer span.End() + + err := mc.client.EnableVersioning(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// SuspendVersioning disables bucket versioning support. +func (mc *Client) SuspendVersioning(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "SuspendVersioning", bucketName) + defer span.End() + + err := mc.client.SuspendVersioning(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +func (mc *Client) startOtelSpan(ctx context.Context, name string, bucketName string) (context.Context, trace.Span) { + spanKind := trace.SpanKindClient + if mc.isDebug { + spanKind = trace.SpanKindInternal + } + + ctx, span := tracer.Start(ctx, name, trace.WithSpanKind(spanKind)) + span.SetAttributes( + common.NewDBSystemAttribute(), + attribute.String("rpc.system", string(mc.providerType)), + ) + + if bucketName != "" { + span.SetAttributes(attribute.String("storage.bucket", bucketName)) + } + + return ctx, span +} diff --git a/connector/storage/minio/lifecycle.go b/connector/storage/minio/lifecycle.go new file mode 100644 index 0000000..dcf7924 --- /dev/null +++ b/connector/storage/minio/lifecycle.go @@ -0,0 +1,408 @@ +package minio + +import ( + "context" + + "github.com/hasura/ndc-sdk-go/scalar" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7/pkg/lifecycle" + "go.opentelemetry.io/otel/codes" +) + +// SetBucketLifecycle sets lifecycle on bucket or an object prefix. +func (mc *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config common.BucketLifecycleConfiguration) error { + ctx, span := mc.startOtelSpan(ctx, "SetBucketLifecycle", bucketName) + defer span.End() + + input := validateLifecycleConfiguration(config) + + err := mc.client.SetBucketLifecycle(ctx, bucketName, &input) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetBucketLifecycle gets lifecycle on a bucket or a prefix. +func (mc *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*common.BucketLifecycleConfiguration, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketLifecycle", bucketName) + defer span.End() + + rawResult, err := mc.client.GetBucketLifecycle(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := serializeLifecycleConfiguration(*rawResult) + + return &result, nil +} + +func validateLifecycleRule(rule common.BucketLifecycleRule) lifecycle.Rule { + r := lifecycle.Rule{ + ID: rule.ID, + Expiration: validateLifecycleExpiration(rule.Expiration), + RuleFilter: validateLifecycleFilter(rule.RuleFilter), + Transition: validateLifecycleTransition(rule.Transition), + } + + if rule.AbortIncompleteMultipartUpload != nil && rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { + r.AbortIncompleteMultipartUpload.DaysAfterInitiation = lifecycle.ExpirationDays(*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation) + } + + if rule.AllVersionsExpiration != nil && (rule.AllVersionsExpiration.Days != nil || rule.AllVersionsExpiration.DeleteMarker != nil) { + if rule.AllVersionsExpiration.Days != nil { + r.AllVersionsExpiration.Days = *rule.AllVersionsExpiration.Days + } + + if rule.DelMarkerExpiration != nil { + r.AllVersionsExpiration.DeleteMarker = lifecycle.ExpireDeleteMarker(*rule.AllVersionsExpiration.DeleteMarker) + } + } + + if rule.DelMarkerExpiration != nil && rule.DelMarkerExpiration.Days != nil { + r.DelMarkerExpiration.Days = *rule.DelMarkerExpiration.Days + } + + if rule.NoncurrentVersionExpiration != nil { + if rule.NoncurrentVersionExpiration.NewerNoncurrentVersions != nil { + r.NoncurrentVersionExpiration.NewerNoncurrentVersions = *rule.NoncurrentVersionExpiration.NewerNoncurrentVersions + } + + if rule.NoncurrentVersionExpiration.NoncurrentDays != nil { + r.NoncurrentVersionExpiration.NoncurrentDays = lifecycle.ExpirationDays(*rule.NoncurrentVersionExpiration.NoncurrentDays) + } + } + + if rule.NoncurrentVersionTransition != nil { + if rule.NoncurrentVersionTransition.NewerNoncurrentVersions != nil { + r.NoncurrentVersionTransition.NewerNoncurrentVersions = *rule.NoncurrentVersionTransition.NewerNoncurrentVersions + } + + if rule.NoncurrentVersionTransition.NoncurrentDays != nil { + r.NoncurrentVersionTransition.NoncurrentDays = lifecycle.ExpirationDays(*rule.NoncurrentVersionTransition.NoncurrentDays) + } + + if rule.NoncurrentVersionTransition.StorageClass != nil { + r.NoncurrentVersionTransition.StorageClass = *rule.NoncurrentVersionTransition.StorageClass + } + } + + if rule.Prefix != nil { + r.Prefix = *rule.Prefix + } + + if rule.Status != nil { + r.Status = *rule.Status + } + + return r +} + +func validateLifecycleExpiration(input *common.LifecycleExpiration) lifecycle.Expiration { + result := lifecycle.Expiration{} + + if input == nil || input.IsEmpty() { + return result + } + + if input.Days != nil { + result.Days = lifecycle.ExpirationDays(*input.Days) + } + + if input.Date != nil { + result.Date = lifecycle.ExpirationDate(*input.Date) + } + + if input.DeleteMarker != nil { + result.DeleteMarker = lifecycle.ExpireDeleteMarker(*input.DeleteMarker) + } + + if input.DeleteAll != nil { + result.DeleteAll = lifecycle.ExpirationBoolean(*input.DeleteAll) + } + + return result +} + +func validateLifecycleTransition(input *common.LifecycleTransition) lifecycle.Transition { + result := lifecycle.Transition{} + + if input == nil { + return result + } + + if input.Days != nil { + result.Days = lifecycle.ExpirationDays(*input.Days) + } + + if input.Date != nil { + result.Date = lifecycle.ExpirationDate(*input.Date) + } + + if input.StorageClass != nil { + result.StorageClass = *input.StorageClass + } + + return result +} + +func validateLifecycleConfiguration(input common.BucketLifecycleConfiguration) lifecycle.Configuration { + result := lifecycle.Configuration{ + Rules: make([]lifecycle.Rule, len(input.Rules)), + } + + for i, rule := range input.Rules { + r := validateLifecycleRule(rule) + result.Rules[i] = r + } + + return result +} + +func validateLifecycleFilter(input *common.LifecycleFilter) lifecycle.Filter { + result := lifecycle.Filter{} + + if input == nil { + return result + } + + if input.Prefix != nil { + result.Prefix = *input.Prefix + } + + if input.ObjectSizeGreaterThan != nil { + result.ObjectSizeGreaterThan = *input.ObjectSizeGreaterThan + } + + if input.ObjectSizeLessThan != nil { + result.ObjectSizeLessThan = *input.ObjectSizeLessThan + } + + if input.Tag != nil { + if input.Tag.Key != nil { + result.Tag.Key = *input.Tag.Key + } + + if input.Tag.Value != nil { + result.Tag.Value = *input.Tag.Value + } + } + + if input.And != nil { + if input.And.Prefix != nil { + result.And.Prefix = *input.And.Prefix + } + + if input.And.ObjectSizeGreaterThan != nil { + result.And.ObjectSizeGreaterThan = *input.And.ObjectSizeGreaterThan + } + + if input.And.ObjectSizeLessThan != nil { + result.And.ObjectSizeLessThan = *input.And.ObjectSizeLessThan + } + + result.And.Tags = make([]lifecycle.Tag, len(input.And.Tags)) + + for i, t := range input.And.Tags { + tag := lifecycle.Tag{} + + if t.Key != nil { + tag.Key = *t.Key + } + + if t.Value != nil { + tag.Value = *t.Value + } + + result.And.Tags[i] = tag + } + } + + return result +} + +func serializeLifecycleRule(rule lifecycle.Rule) common.BucketLifecycleRule { + r := common.BucketLifecycleRule{ + ID: rule.ID, + RuleFilter: serializeLifecycleFilter(rule.RuleFilter), + Transition: serializeLifecycleTransition(rule.Transition), + } + + if !rule.AbortIncompleteMultipartUpload.IsDaysNull() { + days := int(rule.AbortIncompleteMultipartUpload.DaysAfterInitiation) + r.AbortIncompleteMultipartUpload = &common.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: &days, + } + } + + if !rule.AllVersionsExpiration.IsNull() { + deleteMarker := bool(rule.AllVersionsExpiration.DeleteMarker) + r.AllVersionsExpiration = &common.LifecycleAllVersionsExpiration{ + Days: &rule.AllVersionsExpiration.Days, + DeleteMarker: &deleteMarker, + } + } + + if !rule.DelMarkerExpiration.IsNull() { + r.DelMarkerExpiration.Days = &rule.DelMarkerExpiration.Days + } + + if !rule.Expiration.IsNull() { + r.Expiration = &common.LifecycleExpiration{ + DeleteMarker: (*bool)(&rule.Expiration.DeleteMarker), + } + + if rule.Expiration.Days != 0 { + r.Expiration.Days = (*int)(&rule.Expiration.Days) + } + + if !rule.Expiration.Date.IsZero() { + r.Expiration.Date = &scalar.Date{Time: rule.Expiration.Date.Time} + } + } + + if !rule.NoncurrentVersionExpiration.IsDaysNull() || rule.NoncurrentVersionExpiration.NewerNoncurrentVersions != 0 { + r.NoncurrentVersionExpiration = &common.LifecycleNoncurrentVersionExpiration{} + + if rule.NoncurrentVersionExpiration.NewerNoncurrentVersions != 0 { + r.NoncurrentVersionExpiration.NewerNoncurrentVersions = &rule.NoncurrentVersionExpiration.NewerNoncurrentVersions + } + + if !rule.NoncurrentVersionExpiration.IsDaysNull() { + days := int(rule.NoncurrentVersionExpiration.NoncurrentDays) + r.NoncurrentVersionExpiration.NoncurrentDays = &days + } + } + + if !rule.NoncurrentVersionTransition.IsDaysNull() || rule.NoncurrentVersionTransition.NewerNoncurrentVersions != 0 && rule.NoncurrentVersionTransition.StorageClass != "" { + if rule.NoncurrentVersionTransition.NewerNoncurrentVersions != 0 { + r.NoncurrentVersionTransition.NewerNoncurrentVersions = &rule.NoncurrentVersionTransition.NewerNoncurrentVersions + } + + if rule.NoncurrentVersionTransition.NoncurrentDays != 0 { + days := int(rule.NoncurrentVersionTransition.NoncurrentDays) + r.NoncurrentVersionTransition.NoncurrentDays = &days + } + + if rule.NoncurrentVersionTransition.StorageClass != "" { + r.NoncurrentVersionTransition.StorageClass = &rule.NoncurrentVersionTransition.StorageClass + } + } + + if rule.Prefix != "" { + r.Prefix = &rule.Prefix + } + + if rule.Status != "" { + r.Status = &rule.Status + } + + return r +} + +func serializeLifecycleTransition(input lifecycle.Transition) *common.LifecycleTransition { + if input.IsNull() { + return nil + } + + result := common.LifecycleTransition{} + + if input.Days != 0 { + result.Days = (*int)(&input.Days) + } + + if !input.Date.IsZero() { + result.Date = &scalar.Date{Time: input.Date.Time} + } + + if input.StorageClass != "" { + result.StorageClass = &input.StorageClass + } + + return &result +} + +func serializeLifecycleConfiguration(input lifecycle.Configuration) common.BucketLifecycleConfiguration { + result := common.BucketLifecycleConfiguration{ + Rules: make([]common.BucketLifecycleRule, len(input.Rules)), + } + + for i, rule := range input.Rules { + r := serializeLifecycleRule(rule) + result.Rules[i] = r + } + + return result +} + +func serializeLifecycleFilter(input lifecycle.Filter) *common.LifecycleFilter { + result := common.LifecycleFilter{} + + if input.Prefix != "" { + result.Prefix = &input.Prefix + } + + if input.ObjectSizeGreaterThan != 0 { + result.ObjectSizeGreaterThan = &input.ObjectSizeGreaterThan + } + + if input.ObjectSizeLessThan != 0 { + result.ObjectSizeLessThan = &input.ObjectSizeLessThan + } + + if input.Tag.Key != "" || input.Tag.Value != "" { + if input.Tag.Key != "" { + result.Tag.Key = &input.Tag.Key + } + + if input.Tag.Value != "" { + result.Tag.Value = &input.Tag.Value + } + } + + if !input.And.IsEmpty() { + result.And = &common.LifecycleFilterAnd{} + + if input.And.Prefix != "" { + result.And.Prefix = &input.And.Prefix + } + + if input.And.ObjectSizeGreaterThan != 0 { + result.And.ObjectSizeGreaterThan = &input.And.ObjectSizeGreaterThan + } + + if input.And.ObjectSizeLessThan != 0 { + result.And.ObjectSizeLessThan = &input.And.ObjectSizeLessThan + } + + result.And.Tags = make([]common.StorageTag, 0, len(input.And.Tags)) + + for _, t := range input.And.Tags { + if t.IsEmpty() { + continue + } + + tag := common.StorageTag{} + + if t.Key != "" { + tag.Key = &t.Key + } + + if t.Value != "" { + tag.Value = &t.Value + } + + result.And.Tags = append(result.And.Tags, tag) + } + } + + return &result +} diff --git a/connector/storage/minio/object.go b/connector/storage/minio/object.go new file mode 100644 index 0000000..4db85d9 --- /dev/null +++ b/connector/storage/minio/object.go @@ -0,0 +1,813 @@ +package minio + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "time" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/tags" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// ListObjects list objects in a bucket. +func (mc *Client) ListObjects(ctx context.Context, opts *common.ListStorageObjectsOptions) ([]common.StorageObject, error) { + ctx, span := mc.startOtelSpan(ctx, "ListObjects", opts.Bucket) + defer span.End() + + objChan := mc.client.ListObjects(ctx, opts.Bucket, serializeListObjectsOptions(span, opts)) + objects := make([]common.StorageObject, 0) + + for obj := range objChan { + if obj.Err != nil { + span.SetStatus(codes.Error, obj.Err.Error()) + span.RecordError(obj.Err) + + return nil, serializeErrorResponse(obj.Err) + } + + object := serializeObjectInfo(obj) + object.Bucket = opts.Bucket + objects = append(objects, object) + } + + span.SetAttributes(attribute.Int("storage.object_count", len(objects))) + + return objects, nil +} + +// ListIncompleteUploads list partially uploaded objects in a bucket. +func (mc *Client) ListIncompleteUploads(ctx context.Context, args *common.ListIncompleteUploadsArguments) ([]common.StorageObjectMultipartInfo, error) { + ctx, span := mc.startOtelSpan(ctx, "ListIncompleteUploads", args.Bucket) + defer span.End() + + span.SetAttributes( + attribute.String("storage.object_prefix", args.Prefix), + attribute.Bool("storage.options.recursive", args.Recursive), + ) + + objChan := mc.client.ListIncompleteUploads(ctx, args.Bucket, args.Prefix, args.Recursive) + objects := make([]common.StorageObjectMultipartInfo, 0) + + for obj := range objChan { + if obj.Err != nil { + span.SetStatus(codes.Error, obj.Err.Error()) + span.RecordError(obj.Err) + + return nil, serializeErrorResponse(obj.Err) + } + + object := common.StorageObjectMultipartInfo{ + Key: obj.Key, + Initiated: &obj.Initiated, + StorageClass: obj.StorageClass, + Size: obj.Size, + UploadID: obj.UploadID, + } + + objects = append(objects, object) + } + + span.SetAttributes(attribute.Int("storage.object_count", len(objects))) + + return objects, nil +} + +// RemoveIncompleteUpload removes a partially uploaded object. +func (mc *Client) RemoveIncompleteUpload(ctx context.Context, args *common.RemoveIncompleteUploadArguments) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveIncompleteUpload", args.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", args.Object)) + + err := mc.client.RemoveIncompleteUpload(ctx, args.Bucket, args.Object) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetObject returns a stream of the object data. Most of the common errors occur when reading the stream. +func (mc *Client) GetObject(ctx context.Context, opts *common.GetStorageObjectOptions) (io.ReadCloser, error) { + ctx, span := mc.startOtelSpan(ctx, "GetObject", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + options := serializeGetObjectOptions(span, opts) + + object, err := mc.client.GetObject(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + return object, nil +} + +// PutObject uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, +// PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. +func (mc *Client) PutObject(ctx context.Context, args *common.PutStorageObjectArguments, reader io.Reader, objectSize int64) (*common.StorageUploadInfo, error) { + ctx, span := mc.startOtelSpan(ctx, "PutObject", args.Bucket) + defer span.End() + + span.SetAttributes( + attribute.String("storage.key", args.Object), + attribute.Int64("http.response.body.size", objectSize), + ) + + options := minio.PutObjectOptions{ + UserMetadata: args.Options.UserMetadata, + UserTags: args.Options.UserTags, + ContentType: args.Options.ContentType, + ContentEncoding: args.Options.ContentEncoding, + ContentDisposition: args.Options.ContentDisposition, + ContentLanguage: args.Options.ContentLanguage, + CacheControl: args.Options.CacheControl, + NumThreads: args.Options.NumThreads, + StorageClass: args.Options.StorageClass, + PartSize: args.Options.PartSize, + SendContentMd5: args.Options.SendContentMd5, + DisableContentSha256: args.Options.DisableContentSha256, + DisableMultipart: args.Options.DisableMultipart, + WebsiteRedirectLocation: args.Options.WebsiteRedirectLocation, + ConcurrentStreamParts: args.Options.ConcurrentStreamParts, + } + + if args.Options.Expires != nil { + options.Expires = *args.Options.Expires + } + + if args.Options.RetainUntilDate != nil { + options.RetainUntilDate = *args.Options.RetainUntilDate + span.SetAttributes(attribute.String("storage.options.retain_util_date", args.Options.RetainUntilDate.Format(time.RFC3339))) + } + + if args.Options.Mode != nil { + mode := minio.RetentionMode(string(*args.Options.Mode)) + if !mode.IsValid() { + errorMsg := fmt.Sprintf("invalid RetentionMode: %s", *args.Options.Mode) + span.SetStatus(codes.Error, errorMsg) + + return nil, schema.UnprocessableContentError(errorMsg, nil) + } + + options.Mode = mode + } + + if args.Options.LegalHold != nil { + legalHold := minio.LegalHoldStatus(*args.Options.LegalHold) + if !legalHold.IsValid() { + errorMsg := fmt.Sprintf("invalid LegalHoldStatus: %s", *args.Options.LegalHold) + span.SetStatus(codes.Error, errorMsg) + + return nil, schema.UnprocessableContentError(errorMsg, nil) + } + + options.LegalHold = legalHold + } + + if args.Options.Checksum != nil { + options.Checksum = parseChecksumType(*args.Options.Checksum) + } + + if args.Options.AutoChecksum != nil { + options.AutoChecksum = parseChecksumType(*args.Options.AutoChecksum) + } + + object, err := mc.client.PutObject(ctx, args.Bucket, args.Object, reader, objectSize, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := serializeUploadObjectInfo(object) + common.SetUploadInfoAttributes(span, &result) + + return &result, nil +} + +// CopyObject creates or replaces an object through server-side copying of an existing object. +// It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. +// To copy multiple source objects into a single destination object see the ComposeObject API. +func (mc *Client) CopyObject(ctx context.Context, dest common.StorageCopyDestOptions, src common.StorageCopySrcOptions) (*common.StorageUploadInfo, error) { + ctx, span := mc.startOtelSpan(ctx, "CopyObject", dest.Bucket) + defer span.End() + + span.SetAttributes( + attribute.String("storage.key", dest.Object), + attribute.String("storage.copy_source", src.Object), + ) + + destOptions, err := convertCopyDestOptions(dest) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, schema.UnprocessableContentError(err.Error(), nil) + } + + srcOptions := serializeCopySourceOptions(src) + + object, err := mc.client.CopyObject(ctx, *destOptions, srcOptions) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := serializeUploadObjectInfo(object) + common.SetUploadInfoAttributes(span, &result) + + return &result, nil +} + +// ComposeObject creates an object by concatenating a list of source objects using server-side copying. +func (mc *Client) ComposeObject(ctx context.Context, dest common.StorageCopyDestOptions, sources []common.StorageCopySrcOptions) (*common.StorageUploadInfo, error) { + ctx, span := mc.startOtelSpan(ctx, "ComposeObject", dest.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", dest.Object)) + + srcKeys := make([]string, len(sources)) + srcOptions := make([]minio.CopySrcOptions, len(sources)) + + for i, src := range sources { + srcKeys[i] = src.Object + source := serializeCopySourceOptions(src) + srcOptions[i] = source + } + + span.SetAttributes(attribute.StringSlice("storage.copy_sources", srcKeys)) + + destOptions, err := convertCopyDestOptions(dest) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, schema.UnprocessableContentError(err.Error(), nil) + } + + object, err := mc.client.ComposeObject(ctx, *destOptions, srcOptions...) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := serializeUploadObjectInfo(object) + common.SetUploadInfoAttributes(span, &result) + + return &result, nil +} + +// StatObject fetches metadata of an object. +func (mc *Client) StatObject(ctx context.Context, opts *common.GetStorageObjectOptions) (*common.StorageObject, error) { + ctx, span := mc.startOtelSpan(ctx, "StatObject", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + options := serializeGetObjectOptions(span, opts) + + object, err := mc.client.StatObject(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := serializeObjectInfo(object) + result.Bucket = opts.Bucket + common.SetObjectInfoSpanAttributes(span, &result) + + return &result, nil +} + +// RemoveObject removes an object with some specified options. +func (mc *Client) RemoveObject(ctx context.Context, opts *common.RemoveStorageObjectOptions) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveObject", opts.Bucket) + defer span.End() + + span.SetAttributes( + attribute.String("storage.key", opts.Object), + attribute.Bool("storage.options.force_delete", opts.ForceDelete), + attribute.Bool("storage.options.governance_bypass", opts.GovernanceBypass), + ) + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + options := minio.RemoveObjectOptions{ + ForceDelete: opts.ForceDelete, + GovernanceBypass: opts.GovernanceBypass, + VersionID: opts.VersionID, + } + + err := mc.client.RemoveObject(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// RemoveObjects removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. +// The errors observed are sent over the error channel. +func (mc *Client) RemoveObjects(ctx context.Context, opts *common.RemoveStorageObjectsOptions) []common.RemoveStorageObjectError { + ctx, span := mc.startOtelSpan(ctx, "RemoveObjects", opts.Bucket) + defer span.End() + + listOptions := serializeListObjectsOptions(span, &opts.ListStorageObjectsOptions) + span.SetAttributes(attribute.Bool("storage.options.governance_bypass", opts.GovernanceBypass)) + + objectChan := mc.client.ListObjects(ctx, opts.Bucket, listOptions) + + options := minio.RemoveObjectsOptions{ + GovernanceBypass: opts.GovernanceBypass, + } + + errChan := mc.client.RemoveObjects(ctx, opts.Bucket, objectChan, options) + errs := make([]common.RemoveStorageObjectError, 0) + + for err := range errChan { + errs = append(errs, common.RemoveStorageObjectError{ + ObjectName: err.ObjectName, + VersionID: err.VersionID, + Error: err.Err, + }) + } + + return errs +} + +// PutObjectRetention applies object retention lock onto an object. +func (mc *Client) PutObjectRetention(ctx context.Context, opts *common.PutStorageObjectRetentionOptions) error { + ctx, span := mc.startOtelSpan(ctx, "PutObjectRetention", opts.Bucket) + defer span.End() + + span.SetAttributes( + attribute.String("storage.key", opts.Object), + attribute.Bool("storage.options.governance_bypass", opts.GovernanceBypass), + ) + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + if opts.RetainUntilDate != nil { + span.SetAttributes(attribute.String("storage.options.retain_util_date", opts.RetainUntilDate.Format(time.RFC3339))) + } + + options := minio.PutObjectRetentionOptions{ + GovernanceBypass: opts.GovernanceBypass, + VersionID: opts.VersionID, + RetainUntilDate: opts.RetainUntilDate, + } + + if opts.Mode != nil { + mode := minio.RetentionMode(string(*opts.Mode)) + if !mode.IsValid() { + errorMsg := fmt.Sprintf("invalid RetentionMode: %s", *opts.Mode) + span.SetStatus(codes.Error, errorMsg) + + return schema.UnprocessableContentError(errorMsg, nil) + } + + options.Mode = &mode + } + + err := mc.client.PutObjectRetention(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// PutObjectLegalHold applies legal-hold onto an object. +func (mc *Client) PutObjectLegalHold(ctx context.Context, opts *common.PutStorageObjectLegalHoldOptions) error { + ctx, span := mc.startOtelSpan(ctx, "PutObjectLegalHold", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.PutObjectLegalHoldOptions{ + VersionID: opts.VersionID, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + if opts.Status != nil { + span.SetAttributes(attribute.String("storage.options.status", string(*opts.Status))) + + legalHold := minio.LegalHoldStatus(*opts.Status) + if !legalHold.IsValid() { + errorMsg := "invalid LegalHoldStatus: " + string(*opts.Status) + span.SetStatus(codes.Error, errorMsg) + + return schema.UnprocessableContentError(errorMsg, nil) + } + + options.Status = &legalHold + } + + err := mc.client.PutObjectLegalHold(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetObjectLegalHold returns legal-hold status on a given object. +func (mc *Client) GetObjectLegalHold(ctx context.Context, opts *common.GetStorageObjectLegalHoldOptions) (common.StorageLegalHoldStatus, error) { + ctx, span := mc.startOtelSpan(ctx, "GetObjectLegalHold", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.GetObjectLegalHoldOptions{ + VersionID: opts.VersionID, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + status, err := mc.client.GetObjectLegalHold(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return "", serializeErrorResponse(err) + } + + if status == nil { + return "", nil + } + + result := common.StorageLegalHoldStatus(string(*status)) + + return result, nil +} + +// PutObjectTagging sets new object Tags to the given object, replaces/overwrites any existing tags. +func (mc *Client) PutObjectTagging(ctx context.Context, opts *common.PutStorageObjectTaggingOptions) error { + ctx, span := mc.startOtelSpan(ctx, "PutObjectTagging", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.PutObjectTaggingOptions{ + VersionID: opts.VersionID, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + inputTags, err := tags.NewTags(opts.Tags, false) + if err != nil { + span.SetStatus(codes.Error, "failed to convert minio tags") + span.RecordError(err) + + return schema.UnprocessableContentError(err.Error(), nil) + } + + err = mc.client.PutObjectTagging(ctx, opts.Bucket, opts.Object, inputTags, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetObjectTagging fetches Object Tags from the given object. +func (mc *Client) GetObjectTagging(ctx context.Context, opts *common.StorageObjectTaggingOptions) (map[string]string, error) { + ctx, span := mc.startOtelSpan(ctx, "GetObjectTagging", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.GetObjectTaggingOptions{ + VersionID: opts.VersionID, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + results, err := mc.client.GetObjectTagging(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + return results.ToMap(), nil +} + +// RemoveObjectTagging removes Object Tags from the given object. +func (mc *Client) RemoveObjectTagging(ctx context.Context, opts *common.StorageObjectTaggingOptions) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveObjectTagging", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.RemoveObjectTaggingOptions{ + VersionID: opts.VersionID, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + err := mc.client.RemoveObjectTagging(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetObjectAttributes returns a stream of the object data. Most of the common errors occur when reading the stream. +func (mc *Client) GetObjectAttributes(ctx context.Context, opts *common.StorageObjectAttributesOptions) (*common.StorageObjectAttributes, error) { + ctx, span := mc.startOtelSpan(ctx, "GetObjectAttributes", opts.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", opts.Object)) + + options := minio.ObjectAttributesOptions{ + VersionID: opts.VersionID, + MaxParts: opts.MaxParts, + PartNumberMarker: opts.PartNumberMarker, + } + + if opts.VersionID != "" { + span.SetAttributes(attribute.String("storage.options.version", opts.VersionID)) + } + + attrs, err := mc.client.GetObjectAttributes(ctx, opts.Bucket, opts.Object, options) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + checksum := common.StorageObjectChecksum{} + if !isStringNull(attrs.Checksum.ChecksumCRC32) { + checksum.ChecksumCRC32 = &attrs.Checksum.ChecksumCRC32 + } + + if !isStringNull(attrs.Checksum.ChecksumCRC32C) { + checksum.ChecksumCRC32C = &attrs.Checksum.ChecksumCRC32C + } + + if !isStringNull(attrs.Checksum.ChecksumSHA1) { + checksum.ChecksumSHA1 = &attrs.Checksum.ChecksumSHA1 + } + + if !isStringNull(attrs.Checksum.ChecksumSHA256) { + checksum.ChecksumSHA256 = &attrs.Checksum.ChecksumSHA256 + } + + result := &common.StorageObjectAttributes{ + LastModified: attrs.LastModified, + StorageObjectAttributesResponse: common.StorageObjectAttributesResponse{ + ETag: attrs.ETag, + StorageClass: attrs.StorageClass, + ObjectSize: attrs.ObjectSize, + Checksum: checksum, + ObjectParts: common.StorageObjectParts{ + PartsCount: attrs.ObjectParts.PartsCount, + PartNumberMarker: attrs.ObjectParts.PartNumberMarker, + NextPartNumberMarker: attrs.ObjectParts.NextPartNumberMarker, + MaxParts: attrs.ObjectParts.MaxParts, + IsTruncated: attrs.ObjectParts.IsTruncated, + Parts: make([]*common.StorageObjectAttributePart, len(attrs.ObjectParts.Parts)), + }, + }, + } + + if !isStringNull(attrs.VersionID) { + result.VersionID = &attrs.VersionID + } + + for i, p := range attrs.ObjectParts.Parts { + partChecksum := common.StorageObjectChecksum{} + if !isStringNull(p.ChecksumCRC32) { + partChecksum.ChecksumCRC32 = &p.ChecksumCRC32 + } + + if !isStringNull(p.ChecksumCRC32C) { + partChecksum.ChecksumCRC32C = &p.ChecksumCRC32C + } + + if !isStringNull(p.ChecksumSHA1) { + partChecksum.ChecksumSHA1 = &p.ChecksumSHA1 + } + + if !isStringNull(p.ChecksumSHA256) { + partChecksum.ChecksumSHA256 = &p.ChecksumSHA256 + } + + result.ObjectParts.Parts[i] = &common.StorageObjectAttributePart{ + StorageObjectChecksum: partChecksum, + PartNumber: p.PartNumber, + Size: p.Size, + } + } + + return result, nil +} + +// PresignedGetObject generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. +// The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. +func (mc *Client) PresignedGetObject(ctx context.Context, args *common.PresignedGetStorageObjectArguments) (*url.URL, error) { + return mc.presignObject(ctx, http.MethodGet, args) +} + +// PresignedHeadObject generates a presigned URL for HTTP HEAD operations. +// Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. +func (mc *Client) PresignedHeadObject(ctx context.Context, args *common.PresignedGetStorageObjectArguments) (*url.URL, error) { + return mc.presignObject(ctx, http.MethodHead, args) +} + +func (mc *Client) presignObject(ctx context.Context, method string, args *common.PresignedGetStorageObjectArguments) (*url.URL, error) { + ctx, span := mc.startOtelSpan(ctx, method+" PresignedObject", args.Bucket) + defer span.End() + + reqParams := url.Values{} + + for key, params := range args.RequestParams { + for _, param := range params { + reqParams.Add(key, param) + } + } + + span.SetAttributes( + attribute.String("storage.key", args.Object), + attribute.String("url.query", reqParams.Encode()), + ) + + if args.Expiry != nil { + span.SetAttributes(attribute.String("storage.expiry", args.Expiry.String())) + } + + fileName := filepath.Base(args.Object) + // Set request Parameters: for content-disposition. + reqParams.Set("response-content-disposition", fmt.Sprintf(`attachment; filename="%s"`, fileName)) + + var result *url.URL + var err error + header := http.Header{} + + if mc.publicHost != nil { + header.Set("Host", mc.publicHost.Host) + } + + result, err = mc.client.PresignHeader(ctx, method, args.Bucket, args.Object, args.Expiry.Duration, reqParams, header) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + if mc.publicHost != nil { + result.Host = mc.publicHost.Host + if mc.publicHost.Scheme != "" { + result.Scheme = mc.publicHost.Scheme + } + } + + return result, nil +} + +// PresignedPutObject generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. +func (mc *Client) PresignedPutObject(ctx context.Context, args *common.PresignedPutStorageObjectArguments) (*url.URL, error) { + ctx, span := mc.startOtelSpan(ctx, "PresignedPutObject", args.Bucket) + defer span.End() + + span.SetAttributes(attribute.String("storage.key", args.Object)) + + if args.Expiry != nil { + span.SetAttributes(attribute.String("storage.expiry", args.Expiry.String())) + } + + header := http.Header{} + + if mc.publicHost != nil { + header.Set("Host", mc.publicHost.Host) + } + + result, err := mc.client.PresignHeader(ctx, http.MethodPut, args.Bucket, args.Object, args.Expiry.Duration, url.Values{}, header) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + if mc.publicHost != nil { + result.Host = mc.publicHost.Host + if mc.publicHost.Scheme != "" { + result.Scheme = mc.publicHost.Scheme + } + } + + return result, nil +} + +// Set object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (mc *Client) SetObjectLockConfig(ctx context.Context, bucketname string, opts common.SetStorageObjectLockConfig) error { + ctx, span := mc.startOtelSpan(ctx, "SetObjectLockConfig", bucketname) + defer span.End() + + if opts.Mode != nil { + span.SetAttributes(attribute.String("storage.lock_mode", string(*opts.Mode))) + } + + if opts.Unit != nil { + span.SetAttributes(attribute.String("storage.lock_unit", string(*opts.Unit))) + } + + if opts.Validity != nil { + span.SetAttributes(attribute.Int("storage.lock_validity", int(*opts.Validity))) + } + + err := mc.client.SetObjectLockConfig(ctx, bucketname, (*minio.RetentionMode)(opts.Mode), opts.Validity, (*minio.ValidityUnit)(opts.Unit)) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// Get object lock configuration of given bucket. +func (mc *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (*common.StorageObjectLockConfig, error) { + ctx, span := mc.startOtelSpan(ctx, "GetObjectLockConfig", bucketName) + defer span.End() + + objectLock, mode, validity, unit, err := mc.client.GetObjectLockConfig(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + result := &common.StorageObjectLockConfig{ + ObjectLock: objectLock, + SetStorageObjectLockConfig: common.SetStorageObjectLockConfig{ + Mode: (*common.StorageRetentionMode)(mode), + Validity: validity, + Unit: (*common.StorageRetentionValidityUnit)(unit), + }, + } + + return result, nil +} diff --git a/connector/storage/minio/sse.go b/connector/storage/minio/sse.go new file mode 100644 index 0000000..cb43ddb --- /dev/null +++ b/connector/storage/minio/sse.go @@ -0,0 +1,101 @@ +package minio + +import ( + "context" + + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7/pkg/sse" + "go.opentelemetry.io/otel/codes" +) + +// SetBucketEncryption sets default encryption configuration on a bucket. +func (mc *Client) SetBucketEncryption(ctx context.Context, bucketName string, input common.ServerSideEncryptionConfiguration) error { + ctx, span := mc.startOtelSpan(ctx, "SetBucketEncryption", bucketName) + defer span.End() + + err := mc.client.SetBucketEncryption(ctx, bucketName, validateBucketEncryptionConfiguration(input)) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +// GetBucketEncryption gets default encryption configuration set on a bucket. +func (mc *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*common.ServerSideEncryptionConfiguration, error) { + ctx, span := mc.startOtelSpan(ctx, "GetBucketEncryption", bucketName) + defer span.End() + + rawResult, err := mc.client.GetBucketEncryption(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return nil, serializeErrorResponse(err) + } + + return serializeBucketEncryptionConfiguration(rawResult), nil +} + +// RemoveBucketEncryption remove default encryption configuration set on a bucket. +func (mc *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + ctx, span := mc.startOtelSpan(ctx, "RemoveBucketEncryption", bucketName) + defer span.End() + + err := mc.client.RemoveBucketEncryption(ctx, bucketName) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + + return serializeErrorResponse(err) + } + + return nil +} + +func validateBucketEncryptionConfiguration(input common.ServerSideEncryptionConfiguration) *sse.Configuration { + result := &sse.Configuration{ + Rules: make([]sse.Rule, len(input.Rules)), + } + + for i, rule := range input.Rules { + r := sse.Rule{ + Apply: sse.ApplySSEByDefault{ + SSEAlgorithm: rule.Apply.SSEAlgorithm, + }, + } + + if rule.Apply.KmsMasterKeyID != nil { + r.Apply.KmsMasterKeyID = *rule.Apply.KmsMasterKeyID + } + + result.Rules[i] = r + } + + return result +} + +func serializeBucketEncryptionConfiguration(input *sse.Configuration) *common.ServerSideEncryptionConfiguration { + result := &common.ServerSideEncryptionConfiguration{ + Rules: make([]common.ServerSideEncryptionRule, len(input.Rules)), + } + + for i, rule := range input.Rules { + r := common.ServerSideEncryptionRule{ + Apply: common.StorageApplySSEByDefault{ + SSEAlgorithm: rule.Apply.SSEAlgorithm, + }, + } + + if !isStringNull(rule.Apply.KmsMasterKeyID) { + r.Apply.KmsMasterKeyID = &rule.Apply.KmsMasterKeyID + } + + result.Rules[i] = r + } + + return result +} diff --git a/connector/storage/minio/utils.go b/connector/storage/minio/utils.go new file mode 100644 index 0000000..fbaf49e --- /dev/null +++ b/connector/storage/minio/utils.go @@ -0,0 +1,536 @@ +package minio + +import ( + "errors" + "fmt" + "net/url" + + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/notification" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +func serializeGrant(grant minio.Grant) common.StorageGrant { + g := common.StorageGrant{} + + if !isStringNull(grant.Permission) { + g.Permission = &grant.Permission + } + + if !isStringNull(grant.Grantee.ID) || !isStringNull(grant.Grantee.DisplayName) || !isStringNull(grant.Grantee.URI) { + g.Grantee = &common.StorageGrantee{} + + if !isStringNull(grant.Grantee.ID) { + g.Grantee.ID = &grant.Grantee.ID + } + + if !isStringNull(grant.Grantee.DisplayName) { + g.Grantee.DisplayName = &grant.Grantee.DisplayName + } + + if !isStringNull(grant.Grantee.URI) { + g.Grantee.URI = &grant.Grantee.URI + } + } + + return g +} + +func serializeObjectInfo(obj minio.ObjectInfo) common.StorageObject { + grants := make([]common.StorageGrant, len(obj.Grant)) + + for i, grant := range obj.Grant { + grants[i] = serializeGrant(grant) + } + + checksum := common.StorageObjectChecksum{} + if !isStringNull(obj.ChecksumCRC32) { + checksum.ChecksumCRC32 = &obj.ChecksumCRC32 + } + + if !isStringNull(obj.ChecksumCRC32C) { + checksum.ChecksumCRC32C = &obj.ChecksumCRC32C + } + + if !isStringNull(obj.ChecksumSHA1) { + checksum.ChecksumSHA1 = &obj.ChecksumSHA1 + } + + if !isStringNull(obj.ChecksumSHA256) { + checksum.ChecksumSHA256 = &obj.ChecksumSHA256 + } + + object := common.StorageObject{ + ETag: obj.ETag, + Name: obj.Key, + LastModified: obj.LastModified, + Size: obj.Size, + ContentType: obj.ContentType, + Expires: obj.Expires, + Metadata: obj.Metadata, + UserMetadata: obj.UserMetadata, + UserTags: obj.UserTags, + UserTagCount: obj.UserTagCount, + Grant: grants, + IsLatest: &obj.IsLatest, + IsDeleteMarker: &obj.IsDeleteMarker, + ReplicationReady: &obj.ReplicationReady, + StorageObjectChecksum: checksum, + } + + if !isStringNull(obj.Owner.DisplayName) || !isStringNull(obj.Owner.ID) { + object.Owner = &common.StorageOwner{} + if !isStringNull(obj.Owner.DisplayName) { + object.Owner.DisplayName = &obj.Owner.DisplayName + } + + if !isStringNull(obj.Owner.ID) { + object.Owner.ID = &obj.Owner.ID + } + } + + if !isStringNull(obj.StorageClass) { + object.StorageClass = &obj.StorageClass + } + + if !isStringNull(obj.VersionID) { + object.VersionID = &obj.VersionID + } + + if !isStringNull(obj.ExpirationRuleID) { + object.ExpirationRuleID = &obj.ExpirationRuleID + } + + if !obj.Expiration.IsZero() { + object.Expiration = &obj.Expiration + } + + if !isStringNull(obj.ReplicationStatus) { + replicationStatus := common.StorageObjectReplicationStatus(obj.ReplicationStatus) + object.ReplicationStatus = &replicationStatus + } + + if obj.Restore != nil { + object.Restore = &common.StorageRestoreInfo{ + OngoingRestore: obj.Restore.OngoingRestore, + } + + if !obj.Restore.ExpiryTime.IsZero() { + object.Restore.ExpiryTime = &obj.Restore.ExpiryTime + } + } + + return object +} + +func serializeListObjectsOptions(span trace.Span, opts *common.ListStorageObjectsOptions) minio.ListObjectsOptions { + span.SetAttributes( + attribute.Bool("storage.options.recursive", opts.Recursive), + attribute.Bool("storage.options.with_versions", opts.WithVersions), + attribute.Bool("storage.options.with_metadata", opts.WithMetadata), + ) + + if opts.Prefix != "" { + span.SetAttributes(attribute.String("storage.options.prefix", opts.Prefix)) + } + + if !isStringNull(opts.StartAfter) { + span.SetAttributes(attribute.String("storage.options.start_after", opts.StartAfter)) + } + + if opts.MaxKeys > 0 { + span.SetAttributes(attribute.Int("storage.options.max_keys", opts.MaxKeys)) + } + + return minio.ListObjectsOptions{ + WithVersions: opts.WithVersions, + WithMetadata: opts.WithMetadata, + Prefix: opts.Prefix, + Recursive: opts.Recursive, + MaxKeys: opts.MaxKeys, + StartAfter: opts.StartAfter, + } +} + +func serializeUploadObjectInfo(obj minio.UploadInfo) common.StorageUploadInfo { + checksum := common.StorageObjectChecksum{} + if !isStringNull(obj.ChecksumCRC32) { + checksum.ChecksumCRC32 = &obj.ChecksumCRC32 + } + + if !isStringNull(obj.ChecksumCRC32C) { + checksum.ChecksumCRC32C = &obj.ChecksumCRC32C + } + + if !isStringNull(obj.ChecksumSHA1) { + checksum.ChecksumSHA1 = &obj.ChecksumSHA1 + } + + if !isStringNull(obj.ChecksumSHA256) { + checksum.ChecksumSHA256 = &obj.ChecksumSHA256 + } + + object := common.StorageUploadInfo{ + Bucket: obj.Bucket, + ETag: obj.ETag, + Name: obj.Key, + Size: obj.Size, + StorageObjectChecksum: checksum, + } + + if !obj.LastModified.IsZero() { + object.LastModified = &obj.LastModified + } + + if !obj.Expiration.IsZero() { + object.Expiration = &obj.Expiration + } + + if !isStringNull(obj.Location) { + object.Location = &obj.Location + } + + if !isStringNull(obj.VersionID) { + object.VersionID = &obj.VersionID + } + + if !isStringNull(obj.ExpirationRuleID) { + object.ExpirationRuleID = &obj.ExpirationRuleID + } + + return object +} + +func serializeGetObjectOptions(span trace.Span, opts *common.GetStorageObjectOptions) minio.GetObjectOptions { + options := minio.GetObjectOptions{} + if opts.VersionID != nil && !isStringNull(*opts.VersionID) { + options.VersionID = *opts.VersionID + span.SetAttributes(attribute.String("storage.request_object_version", options.VersionID)) + } + + if opts.PartNumber != nil { + options.PartNumber = *opts.PartNumber + span.SetAttributes(attribute.Int("storage.part_number", options.PartNumber)) + } + + if opts.Checksum != nil { + options.Checksum = *opts.Checksum + span.SetAttributes(attribute.Bool("storage.request_object_checksum", options.Checksum)) + } + + for key, value := range opts.Headers { + span.SetAttributes(attribute.StringSlice("http.request.header."+key, []string{value})) + options.Set(key, value) + } + + if len(opts.RequestParams) > 0 { + q := url.Values{} + + for key, values := range opts.RequestParams { + for _, value := range values { + options.AddReqParam(key, value) + q.Add(key, value) + } + } + + span.SetAttributes(attribute.String("url.query", q.Encode())) + } + + return options +} + +func serializeCopySourceOptions(src common.StorageCopySrcOptions) minio.CopySrcOptions { + srcOptions := minio.CopySrcOptions{ + Bucket: src.Bucket, + Object: src.Object, + VersionID: src.VersionID, + MatchETag: src.MatchETag, + NoMatchETag: src.NoMatchETag, + MatchRange: src.MatchRange, + Start: src.Start, + End: src.End, + } + + if src.MatchModifiedSince != nil { + srcOptions.MatchModifiedSince = *src.MatchModifiedSince + } + + if src.MatchUnmodifiedSince != nil { + srcOptions.MatchUnmodifiedSince = *src.MatchUnmodifiedSince + } + + return srcOptions +} + +func convertCopyDestOptions(dst common.StorageCopyDestOptions) (*minio.CopyDestOptions, error) { + destOptions := minio.CopyDestOptions{ + Bucket: dst.Bucket, + Object: dst.Object, + UserMetadata: dst.UserMetadata, + ReplaceMetadata: dst.ReplaceMetadata, + UserTags: dst.UserTags, + ReplaceTags: dst.ReplaceTags, + Size: dst.Size, + } + + if dst.RetainUntilDate != nil { + destOptions.RetainUntilDate = *dst.RetainUntilDate + } + + if dst.Mode != nil { + mode := minio.RetentionMode(string(*dst.Mode)) + if !mode.IsValid() { + return nil, fmt.Errorf("invalid RetentionMode: %s", *dst.Mode) + } + + destOptions.Mode = mode + } + + if dst.LegalHold != nil { + legalHold := minio.LegalHoldStatus(*dst.LegalHold) + if !legalHold.IsValid() { + return nil, fmt.Errorf("invalid LegalHoldStatus: %s", *dst.LegalHold) + } + + destOptions.LegalHold = legalHold + } + + return &destOptions, nil +} + +func serializeBucketNotificationCommonConfig(item notification.Config) common.NotificationCommonConfig { + cfg := common.NotificationCommonConfig{ + Events: make([]string, len(item.Events)), + } + + if !isStringNull(item.ID) { + cfg.ID = &item.ID + } + + if item.Filter != nil { + cfg.Filter.S3Key.FilterRules = make([]common.NotificationFilterRule, len(item.Filter.S3Key.FilterRules)) + for i, rule := range item.Filter.S3Key.FilterRules { + cfg.Filter.S3Key.FilterRules[i] = common.NotificationFilterRule(rule) + } + } + + if item.Arn.AccountID != "" || item.Arn.Partition != "" || item.Arn.Resource != "" || item.Arn.Service != "" { + arn := item.Arn.String() + cfg.Arn = &arn + } + + for i, eventType := range item.Events { + cfg.Events[i] = string(eventType) + } + + return cfg +} + +func validateBucketNotificationCommonConfig(item common.NotificationCommonConfig) (*notification.Config, error) { + cfg := notification.Config{ + Events: make([]notification.EventType, len(item.Events)), + } + + if item.ID != nil { + cfg.ID = *item.ID + } + + if item.Filter != nil && item.Filter.S3Key != nil { + cfg.Filter = ¬ification.Filter{ + S3Key: notification.S3Key{ + FilterRules: make([]notification.FilterRule, len(item.Filter.S3Key.FilterRules)), + }, + } + + for i, rule := range item.Filter.S3Key.FilterRules { + cfg.Filter.S3Key.FilterRules[i] = notification.FilterRule(rule) + } + } + + if item.Arn != nil { + arn, err := notification.NewArnFromString(*item.Arn) + if err != nil { + return nil, err + } + + cfg.Arn = arn + } + + for i, eventType := range item.Events { + cfg.Events[i] = notification.EventType(eventType) + } + + return &cfg, nil +} + +func validateBucketNotificationConfig(input common.NotificationConfig) (*notification.Configuration, error) { + result := notification.Configuration{ + LambdaConfigs: make([]notification.LambdaConfig, len(input.LambdaConfigs)), + TopicConfigs: make([]notification.TopicConfig, len(input.TopicConfigs)), + QueueConfigs: make([]notification.QueueConfig, len(input.QueueConfigs)), + } + + for i, item := range input.LambdaConfigs { + commonCfg, err := validateBucketNotificationCommonConfig(item.NotificationCommonConfig) + if err != nil { + return nil, fmt.Errorf("cloudFunctionConfigurations[%d]: %w", i, err) + } + + cfg := notification.LambdaConfig{ + Lambda: item.Lambda, + Config: *commonCfg, + } + + result.LambdaConfigs[i] = cfg + } + + for i, item := range input.QueueConfigs { + commonCfg, err := validateBucketNotificationCommonConfig(item.NotificationCommonConfig) + if err != nil { + return nil, fmt.Errorf("queueConfigurations[%d]: %w", i, err) + } + + cfg := notification.QueueConfig{ + Queue: item.Queue, + Config: *commonCfg, + } + + result.QueueConfigs[i] = cfg + } + + for i, item := range input.TopicConfigs { + commonCfg, err := validateBucketNotificationCommonConfig(item.NotificationCommonConfig) + if err != nil { + return nil, fmt.Errorf("topicConfigurations[%d]: %w", i, err) + } + + cfg := notification.TopicConfig{ + Topic: item.Topic, + Config: *commonCfg, + } + + result.TopicConfigs[i] = cfg + } + + return &result, nil +} + +func serializeBucketNotificationConfig(input notification.Configuration) *common.NotificationConfig { + result := common.NotificationConfig{ + LambdaConfigs: make([]common.NotificationLambdaConfig, len(input.LambdaConfigs)), + TopicConfigs: make([]common.NotificationTopicConfig, len(input.TopicConfigs)), + QueueConfigs: make([]common.NotificationQueueConfig, len(input.QueueConfigs)), + } + + for i, item := range input.LambdaConfigs { + cfg := common.NotificationLambdaConfig{ + Lambda: item.Lambda, + NotificationCommonConfig: serializeBucketNotificationCommonConfig(item.Config), + } + + result.LambdaConfigs[i] = cfg + } + + for i, item := range input.QueueConfigs { + cfg := common.NotificationQueueConfig{ + Queue: item.Queue, + NotificationCommonConfig: serializeBucketNotificationCommonConfig(item.Config), + } + + result.QueueConfigs[i] = cfg + } + + for i, item := range input.TopicConfigs { + cfg := common.NotificationTopicConfig{ + Topic: item.Topic, + NotificationCommonConfig: serializeBucketNotificationCommonConfig(item.Config), + } + + result.TopicConfigs[i] = cfg + } + + return &result +} + +func parseChecksumType(input common.ChecksumType) minio.ChecksumType { + switch string(input) { + case "SHA256": + return minio.ChecksumSHA256 + case "SHA1": + return minio.ChecksumSHA1 + case "CRC32": + return minio.ChecksumCRC32 + case "CRC32C": + return minio.ChecksumCRC32C + case "CRC64NVME": + return minio.ChecksumCRC64NVME + case "FullObjectCRC32": + return minio.ChecksumFullObjectCRC32 + case "FullObjectCRC32C": + return minio.ChecksumFullObjectCRC32C + default: + return minio.ChecksumNone + } +} + +func evalMinioErrorResponse(err minio.ErrorResponse) *schema.ConnectorError { + details := map[string]any{ + "statusCode": err.StatusCode, + "server": err.Server, + } + + if err.Code != "" { + details["code"] = err.Code + } + + if err.BucketName != "" { + details["bucketName"] = err.BucketName + } + + if err.Key != "" { + details["key"] = err.Key + } + + if err.HostID != "" { + details["hostId"] = err.HostID + } + + if err.RequestID != "" { + details["requestId"] = err.RequestID + } + + if err.Resource != "" { + details["resource"] = err.Resource + } + + if err.Region != "" { + details["region"] = err.Region + } + + if err.StatusCode >= 500 { + return schema.NewConnectorError(err.StatusCode, err.Message, details) + } + + return schema.UnprocessableContentError(err.Message, details) +} + +func serializeErrorResponse(err error) *schema.ConnectorError { + var errResponse minio.ErrorResponse + if errors.As(err, &errResponse) { + return evalMinioErrorResponse(errResponse) + } + + errRespPtr := &errResponse + if errors.As(err, &errRespPtr) { + return evalMinioErrorResponse(*errRespPtr) + } + + return schema.UnprocessableContentError(err.Error(), nil) +} + +func isStringNull(input string) bool { + return input == "" || input == "null" +} diff --git a/connector/storage/object.go b/connector/storage/object.go new file mode 100644 index 0000000..5f4f419 --- /dev/null +++ b/connector/storage/object.go @@ -0,0 +1,368 @@ +package storage + +import ( + "bytes" + "context" + "io" + "strings" + "time" + + "github.com/hasura/ndc-sdk-go/scalar" + "github.com/hasura/ndc-sdk-go/schema" + "github.com/hasura/ndc-storage/connector/storage/common" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ListObjects lists objects in a bucket. +func (m *Manager) ListObjects(ctx context.Context, args *common.ListStorageObjectsOptions) ([]common.StorageObject, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + + return client.ListObjects(ctx, args) +} + +// ListIncompleteUploads list partially uploaded objects in a bucket. +func (m *Manager) ListIncompleteUploads(ctx context.Context, args *common.ListIncompleteUploadsArguments) ([]common.StorageObjectMultipartInfo, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Prefix = normalizeObjectName(args.Prefix) + + return client.ListIncompleteUploads(ctx, args) +} + +// GetObject returns a stream of the object data. Most of the common errors occur when reading the stream. +func (m *Manager) GetObject(ctx context.Context, args *common.GetStorageObjectOptions) (io.ReadCloser, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.GetObject(ctx, args) +} + +// PutObject uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, +// PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. +func (m *Manager) PutObject(ctx context.Context, args *common.PutStorageObjectArguments, data []byte) (*common.StorageUploadInfo, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.PutObject(ctx, args, bytes.NewReader(data), int64(len(data))) +} + +// CopyObject creates or replaces an object through server-side copying of an existing object. +// It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. +// To copy multiple source objects into a single destination object see the ComposeObject API. +func (m *Manager) CopyObject(ctx context.Context, args *common.CopyStorageObjectArguments) (*common.StorageUploadInfo, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Dest.Bucket) + if err != nil { + return nil, err + } + + args.Dest.Bucket = bucketName + args.Dest.Object = normalizeObjectName(args.Dest.Object) + + if args.Source.Bucket == "" { + args.Source.Bucket = client.defaultBucket + args.Source.Object = normalizeObjectName(args.Source.Object) + } + + return client.CopyObject(ctx, args.Dest, args.Source) +} + +// ComposeObject creates an object by concatenating a list of source objects using server-side copying. +func (m *Manager) ComposeObject(ctx context.Context, args *common.ComposeStorageObjectArguments) (*common.StorageUploadInfo, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Dest.Bucket) + if err != nil { + return nil, err + } + + args.Dest.Bucket = bucketName + args.Dest.Object = normalizeObjectName(args.Dest.Object) + srcs := make([]common.StorageCopySrcOptions, len(args.Sources)) + + for i, src := range args.Sources { + if src.Bucket == "" { + src.Bucket = client.defaultBucket + } + + src.Object = normalizeObjectName(src.Object) + + srcs[i] = src + } + + return client.ComposeObject(ctx, args.Dest, srcs) +} + +// StatObject fetches metadata of an object. +func (m *Manager) StatObject(ctx context.Context, args *common.GetStorageObjectOptions) (*common.StorageObject, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.StatObject(ctx, args) +} + +// RemoveObject removes an object with some specified options. +func (m *Manager) RemoveObject(ctx context.Context, args *common.RemoveStorageObjectOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.RemoveObject(ctx, args) +} + +// PutObjectRetention applies object retention lock onto an object. +func (m *Manager) PutObjectRetention(ctx context.Context, args *common.PutStorageObjectRetentionOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.PutObjectRetention(ctx, args) +} + +// RemoveObjects remove a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. +// The errors observed are sent over the error channel. +func (m *Manager) RemoveObjects(ctx context.Context, args *common.RemoveStorageObjectsOptions) ([]common.RemoveStorageObjectError, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Prefix = normalizeObjectName(args.Prefix) + + return client.RemoveObjects(ctx, args), nil +} + +// PutObjectLegalHold applies legal-hold onto an object. +func (m *Manager) PutObjectLegalHold(ctx context.Context, args *common.PutStorageObjectLegalHoldOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.PutObjectLegalHold(ctx, args) +} + +// GetObjectLegalHold returns legal-hold status on a given object. +func (m *Manager) GetObjectLegalHold(ctx context.Context, args *common.GetStorageObjectLegalHoldOptions) (common.StorageLegalHoldStatus, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return "", err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.GetObjectLegalHold(ctx, args) +} + +// PutObjectTagging sets new object Tags to the given object, replaces/overwrites any existing tags. +func (m *Manager) PutObjectTagging(ctx context.Context, args *common.PutStorageObjectTaggingOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.PutObjectTagging(ctx, args) +} + +// GetObjectTagging fetches Object Tags from the given object. +func (m *Manager) GetObjectTagging(ctx context.Context, args *common.StorageObjectTaggingOptions) (map[string]string, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.GetObjectTagging(ctx, args) +} + +// RemoveObjectTagging removes Object Tags from the given object. +func (m *Manager) RemoveObjectTagging(ctx context.Context, args *common.StorageObjectTaggingOptions) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.RemoveObjectTagging(ctx, args) +} + +// GetObjectAttributes returns a stream of the object data. Most of the common errors occur when reading the stream. +func (m *Manager) GetObjectAttributes(ctx context.Context, args *common.StorageObjectAttributesOptions) (*common.StorageObjectAttributes, error) { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return nil, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.GetObjectAttributes(ctx, args) +} + +// RemoveIncompleteUpload removes a partially uploaded object. +func (m *Manager) RemoveIncompleteUpload(ctx context.Context, args *common.RemoveIncompleteUploadArguments) error { + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + return client.RemoveIncompleteUpload(ctx, args) +} + +// PresignedGetObject generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. +// The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. +func (m *Manager) PresignedGetObject(ctx context.Context, args *common.PresignedGetStorageObjectArguments) (common.PresignedURLResponse, error) { + if err := s3utils.CheckValidObjectName(args.Object); err != nil { + return common.PresignedURLResponse{}, schema.UnprocessableContentError(err.Error(), nil) + } + + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return common.PresignedURLResponse{}, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + if args.Expiry == nil { + if client.defaultPresignedExpiry != nil { + expiry := scalar.NewDuration(*client.defaultPresignedExpiry) + args.Expiry = &expiry + } + + return common.PresignedURLResponse{}, schema.UnprocessableContentError("expiry is required", nil) + } + + rawURL, err := client.PresignedGetObject(ctx, args) + if err != nil { + return common.PresignedURLResponse{}, err + } + + return common.PresignedURLResponse{ + URL: rawURL.String(), + ExpiredAt: FormatTimestamp(time.Now().Add(args.Expiry.Duration)), + }, nil +} + +// PresignedPutObject generates a presigned URL for HTTP PUT operations. +// Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. +// The default expiry is set to 7 days. +func (m *Manager) PresignedPutObject(ctx context.Context, args *common.PresignedPutStorageObjectArguments) (common.PresignedURLResponse, error) { + if err := s3utils.CheckValidObjectName(args.Object); err != nil { + return common.PresignedURLResponse{}, schema.UnprocessableContentError(err.Error(), nil) + } + + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return common.PresignedURLResponse{}, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + if args.Expiry == nil { + if client.defaultPresignedExpiry != nil { + expiry := scalar.NewDuration(*client.defaultPresignedExpiry) + args.Expiry = &expiry + } + + return common.PresignedURLResponse{}, schema.UnprocessableContentError("expiry is required", nil) + } + + rawURL, err := client.PresignedPutObject(ctx, args) + if err != nil { + return common.PresignedURLResponse{}, err + } + + return common.PresignedURLResponse{ + URL: rawURL.String(), + ExpiredAt: FormatTimestamp(time.Now().Add(args.Expiry.Duration)), + }, nil +} + +// PresignedHeadObject generates a presigned URL for HTTP HEAD operations. +// Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. +// This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. +func (m *Manager) PresignedHeadObject(ctx context.Context, args *common.PresignedGetStorageObjectArguments) (common.PresignedURLResponse, error) { + if err := s3utils.CheckValidObjectName(args.Object); err != nil { + return common.PresignedURLResponse{}, schema.UnprocessableContentError(err.Error(), nil) + } + + client, bucketName, err := m.GetClientAndBucket(args.ClientID, args.Bucket) + if err != nil { + return common.PresignedURLResponse{}, err + } + + args.Bucket = bucketName + args.Object = normalizeObjectName(args.Object) + + if args.Expiry == nil { + if client.defaultPresignedExpiry != nil { + expiry := scalar.NewDuration(*client.defaultPresignedExpiry) + args.Expiry = &expiry + } + + return common.PresignedURLResponse{}, schema.UnprocessableContentError("expiry is required", nil) + } + + rawURL, err := client.PresignedHeadObject(ctx, args) + if err != nil { + return common.PresignedURLResponse{}, err + } + + return common.PresignedURLResponse{ + URL: rawURL.String(), + ExpiredAt: FormatTimestamp(time.Now().Add(args.Expiry.Duration)), + }, nil +} + +func normalizeObjectName(objectName string) string { + // replace Unix-compatible backslashes in the file path when run on Windows OS + return strings.ReplaceAll(objectName, "\\", "/") +} diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/expected.json b/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/request.json b/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/request.json new file mode 100644 index 0000000..9a6c9aa --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket-lock/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "createStorageBucket", + "arguments": { + "clientId": "minio", + "name": "minio-bucket-lock", + "objectLocking": true, + "region": "us-east-1" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/expected.json b/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/request.json b/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/request.json new file mode 100644 index 0000000..b7bbcd2 --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket-s3/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "createStorageBucket", + "arguments": { + "clientId": "s3", + "name": "s3-bucket-test", + "objectLocking": true, + "region": "us-east-1" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket/expected.json b/connector/testdata/01-setup/mutation/01-createStorageBucket/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/01-createStorageBucket/request.json b/connector/testdata/01-setup/mutation/01-createStorageBucket/request.json new file mode 100644 index 0000000..feaba5d --- /dev/null +++ b/connector/testdata/01-setup/mutation/01-createStorageBucket/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "createStorageBucket", + "arguments": { + "clientId": "minio", + "name": "minio-bucket-test", + "objectLocking": false, + "region": "us-east-1" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/expected.json b/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/expected.json new file mode 100644 index 0000000..777dfc1 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": true, "type": "procedure" }] } diff --git a/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/request.json b/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/request.json new file mode 100644 index 0000000..426e032 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-enableStorageBucketVersioning/request.json @@ -0,0 +1,12 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "enableStorageBucketVersioning", + "arguments": { + "bucket": "minio-bucket-test" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/expected.json b/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/request.json b/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/request.json new file mode 100644 index 0000000..f0090f7 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketLifecycle/request.json @@ -0,0 +1,62 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "setStorageBucketLifecycle", + "arguments": { + "bucket": "minio-bucket-test", + "rules": [ + { + "abortIncompleteMultipartUpload": { + "daysAfterInitiation": 1 + }, + "allVersionsExpiration": { + "days": 2, + "deleteMarker": false + }, + "expiration": { + "days": 4, + "expiredObjectAllVersions": false, + "expiredObjectDeleteMarker": false + }, + "filter": { + "and": { + "objectSizeGreaterThan": 1000000, + "objectSizeLessThan": 1, + "prefix": "JSqXP0pJZF", + "tags": [ + { + "key": "7Uw1or2jrq", + "value": "00sA2cRTCB" + } + ] + }, + "objectSizeGreaterThan": 1000000, + "objectSizeLessThan": 1, + "prefix": "30LomArtyT", + "tag": { + "key": "raZx9yPMwi", + "value": "JbBh0wCDaQ" + } + }, + "id": "aXD6eFi5JE", + "noncurrentVersionExpiration": { + "newerNoncurrentVersions": 108, + "noncurrentDays": 12 + }, + "noncurrentVersionTransition": { + "newerNoncurrentVersions": 155, + "noncurrentDays": 7 + }, + "prefix": "GPJel0xa2s", + "status": "Enabled", + "transition": { + "days": 5583 + } + } + ] + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/expected.json b/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/expected.json new file mode 100644 index 0000000..777dfc1 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": true, "type": "procedure" }] } diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/request.json b/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/request.json new file mode 100644 index 0000000..19e96ba --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketNotification/request.json @@ -0,0 +1,13 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "setStorageBucketNotification", + "arguments": { + "bucket": "minio-bucket-test", + "queueConfigurations": [] + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketTags/expected.json b/connector/testdata/01-setup/mutation/02-setStorageBucketTags/expected.json new file mode 100644 index 0000000..777dfc1 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketTags/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": true, "type": "procedure" }] } diff --git a/connector/testdata/01-setup/mutation/02-setStorageBucketTags/request.json b/connector/testdata/01-setup/mutation/02-setStorageBucketTags/request.json new file mode 100644 index 0000000..8687212 --- /dev/null +++ b/connector/testdata/01-setup/mutation/02-setStorageBucketTags/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "setStorageBucketTags", + "arguments": { + "bucket": "minio-bucket-test", + "tags": { + "Foo": "bar" + } + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/03-uploadStorageObject/expected.json b/connector/testdata/01-setup/mutation/03-uploadStorageObject/expected.json new file mode 100644 index 0000000..712ee25 --- /dev/null +++ b/connector/testdata/01-setup/mutation/03-uploadStorageObject/expected.json @@ -0,0 +1,22 @@ +{ + "operation_results": [ + { + "result": { + "bucket": "minio-bucket-test", + "checksumCrc32": null, + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": "rUiPKiW98se3IJ8h0sXCX33z81tncK1sF4FXb09vrZM=", + "etag": "299d2d913ceaa8377d22244db39723ca", + "expiration": null, + "expirationRuleId": null, + "lastModified": null, + "location": null, + "name": "I81XNHrIsl", + "size": 10 + }, + "type": "procedure" + } + ] +} diff --git a/connector/testdata/01-setup/mutation/03-uploadStorageObject/request.json b/connector/testdata/01-setup/mutation/03-uploadStorageObject/request.json new file mode 100644 index 0000000..c695c65 --- /dev/null +++ b/connector/testdata/01-setup/mutation/03-uploadStorageObject/request.json @@ -0,0 +1,96 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "uploadStorageObject", + "arguments": { + "bucket": "minio-bucket-test", + "data": "SVpHWjhMMHZzVw==", + "object": "I81XNHrIsl", + "options": { + "autoChecksum": "SHA256", + "cacheControl": "max-age=180, public", + "checksum": "SHA256", + "clientId": "s3", + "concurrentStreamParts": false, + "contentDisposition": "attachment", + "contentEncoding": "gzip", + "contentLanguage": "en-US", + "contentType": "text/plain", + "disableContentSha256": false, + "disableMultipart": false, + "expires": "2099-01-01T00:00:00Z", + "numThreads": 1, + "partSize": 3573915715657741285, + "sendContentMd5": false, + "storageClass": "STANDARD", + "userMetadata": { + "Foo": "Bar" + }, + "userTags": { + "UserID": "1" + }, + "websiteRedirectLocation": "http://localhost:9001" + } + }, + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "expiration": { + "column": "expiration", + "type": "column" + }, + "expirationRuleId": { + "column": "expirationRuleId", + "type": "column" + }, + "lastModified": { + "column": "lastModified", + "type": "column" + }, + "location": { + "column": "location", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + } + }, + "type": "object" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/expected.json b/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/expected.json new file mode 100644 index 0000000..893798b --- /dev/null +++ b/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/expected.json @@ -0,0 +1,22 @@ +{ + "operation_results": [ + { + "result": { + "bucket": "minio-bucket-lock", + "checksumCrc32": "5qWdvg==", + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": null, + "etag": "eaa542050b6be94b64f717af19071987", + "expiration": null, + "expirationRuleId": null, + "lastModified": null, + "location": null, + "name": "SiKBc6ifDC", + "size": 16 + }, + "type": "procedure" + } + ] +} diff --git a/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/request.json b/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/request.json new file mode 100644 index 0000000..a34bd32 --- /dev/null +++ b/connector/testdata/01-setup/mutation/03-uploadStorageObjectText/request.json @@ -0,0 +1,99 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "uploadStorageObjectText", + "arguments": { + "bucket": "minio-bucket-lock", + "data": "SGVsbG8gd29ybGQK", + "object": "SiKBc6ifDC", + "options": { + "autoChecksum": "CRC32", + "cacheControl": "max-age=100", + "checksum": "CRC32", + "clientId": "s3", + "concurrentStreamParts": false, + "contentDisposition": "attachment", + "contentEncoding": "gzip", + "contentLanguage": "en-US", + "contentType": "text/plain", + "disableContentSha256": false, + "disableMultipart": false, + "expires": "2099-01-01T00:00:00Z", + "legalHold": "ON", + "mode": "GOVERNANCE", + "retainUntilDate": "2099-01-01T00:00:02Z", + "numThreads": 2, + "partSize": 35739157156577, + "sendContentMd5": false, + "storageClass": "STANDARD", + "userMetadata": { + "Foo": "Baz" + }, + "userTags": { + "UserID": "2" + }, + "websiteRedirectLocation": "http://localhost:9001" + } + }, + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "expiration": { + "column": "expiration", + "type": "column" + }, + "expirationRuleId": { + "column": "expirationRuleId", + "type": "column" + }, + "lastModified": { + "column": "lastModified", + "type": "column" + }, + "location": { + "column": "location", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + } + }, + "type": "object" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/04-composeStorageObject/expected.json b/connector/testdata/01-setup/mutation/04-composeStorageObject/expected.json new file mode 100644 index 0000000..8798ea2 --- /dev/null +++ b/connector/testdata/01-setup/mutation/04-composeStorageObject/expected.json @@ -0,0 +1,23 @@ +{ + "operation_results": [ + { + "result": { + "bucket": "minio-bucket-test", + "checksumCrc32": null, + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": null, + "etag": "64372db0af1ea1279271d7d4dcedbe3d-1", + "expiration": null, + "expirationRuleId": null, + "expires": null, + "lastModified": null, + "location": "http://localhost:9000/minio-bucket-test/a9cSGZGzM2", + "name": "a9cSGZGzM2", + "size": 10 + }, + "type": "procedure" + } + ] +} diff --git a/connector/testdata/01-setup/mutation/04-composeStorageObject/request.json b/connector/testdata/01-setup/mutation/04-composeStorageObject/request.json new file mode 100644 index 0000000..9a56545 --- /dev/null +++ b/connector/testdata/01-setup/mutation/04-composeStorageObject/request.json @@ -0,0 +1,88 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "composeStorageObject", + "arguments": { + "dest": { + "bucket": "minio-bucket-test", + "object": "a9cSGZGzM2", + "replaceMetadata": false, + "replaceTags": false, + "size": 352 + }, + "sources": [ + { + "bucket": "minio-bucket-test", + "end": 34, + "noMatchETag": "ic0R0GYP3Z", + "object": "I81XNHrIsl", + "start": 0 + } + ] + }, + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "expiration": { + "column": "expiration", + "type": "column" + }, + "expirationRuleId": { + "column": "expirationRuleId", + "type": "column" + }, + "expires": { + "column": "expires", + "type": "column" + }, + "lastModified": { + "column": "lastModified", + "type": "column" + }, + "location": { + "column": "location", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + } + }, + "type": "object" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/04-copyStorageObject/expected.json b/connector/testdata/01-setup/mutation/04-copyStorageObject/expected.json new file mode 100644 index 0000000..29c6715 --- /dev/null +++ b/connector/testdata/01-setup/mutation/04-copyStorageObject/expected.json @@ -0,0 +1,22 @@ +{ + "operation_results": [ + { + "result": { + "bucket": "minio-bucket-test", + "checksumCrc32": null, + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": null, + "etag": "299d2d913ceaa8377d22244db39723ca", + "expiration": null, + "expirationRuleId": null, + "expires": null, + "location": null, + "name": "I81XNHrIsl-2", + "size": 0 + }, + "type": "procedure" + } + ] +} diff --git a/connector/testdata/01-setup/mutation/04-copyStorageObject/request.json b/connector/testdata/01-setup/mutation/04-copyStorageObject/request.json new file mode 100644 index 0000000..0047873 --- /dev/null +++ b/connector/testdata/01-setup/mutation/04-copyStorageObject/request.json @@ -0,0 +1,91 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "copyStorageObject", + "arguments": { + "dest": { + "bucket": "minio-bucket-test", + "object": "I81XNHrIsl-2", + "replaceMetadata": false, + "replaceTags": true, + "size": 8502136105187820186, + "userMetadata": { + "UserID": "2" + }, + "userTags": { + "Copy": "true" + } + }, + "source": { + "bucket": "minio-bucket-test", + "end": 1730102302880743319, + "matchModifiedSince": "1970-01-01T00:00:01Z", + "matchRange": true, + "matchUnmodifiedSince": "2099-01-01T00:00:01Z", + "noMatchETag": "ic0R0GYP3Z", + "object": "I81XNHrIsl", + "start": 1 + } + }, + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "expiration": { + "column": "expiration", + "type": "column" + }, + "expirationRuleId": { + "column": "expirationRuleId", + "type": "column" + }, + "expires": { + "column": "expires", + "type": "column" + }, + "location": { + "column": "location", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + } + }, + "type": "object" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/expected.json b/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/request.json b/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/request.json new file mode 100644 index 0000000..3408ac3 --- /dev/null +++ b/connector/testdata/01-setup/mutation/05-putStorageObjectLegalHold/request.json @@ -0,0 +1,14 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "putStorageObjectLegalHold", + "arguments": { + "bucket": "minio-bucket-lock", + "object": "SiKBc6ifDC", + "status": "ON" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/expected.json b/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/request.json b/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/request.json new file mode 100644 index 0000000..37b91b5 --- /dev/null +++ b/connector/testdata/01-setup/mutation/06-putStorageObjectRetention/request.json @@ -0,0 +1,16 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "putStorageObjectRetention", + "arguments": { + "bucket": "minio-bucket-lock", + "governanceBypass": true, + "mode": "COMPLIANCE", + "object": "SiKBc6ifDC", + "retainUntilDate": "2099-01-01T00:00:00Z" + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/07-putStorageObjectTags/expected.json b/connector/testdata/01-setup/mutation/07-putStorageObjectTags/expected.json new file mode 100644 index 0000000..777dfc1 --- /dev/null +++ b/connector/testdata/01-setup/mutation/07-putStorageObjectTags/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": true, "type": "procedure" }] } diff --git a/connector/testdata/01-setup/mutation/07-putStorageObjectTags/request.json b/connector/testdata/01-setup/mutation/07-putStorageObjectTags/request.json new file mode 100644 index 0000000..0620d4e --- /dev/null +++ b/connector/testdata/01-setup/mutation/07-putStorageObjectTags/request.json @@ -0,0 +1,17 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "putStorageObjectTags", + "arguments": { + "bucket": "minio-bucket-test", + "object": "I81XNHrIsl-2", + "tags": { + "Foo": "baz", + "UserID": "3" + } + } + } + ] +} diff --git a/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/expected.json b/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/request.json b/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/request.json new file mode 100644 index 0000000..c705d68 --- /dev/null +++ b/connector/testdata/01-setup/mutation/08-setStorageObjectLockConfig/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "setStorageObjectLockConfig", + "arguments": { + "bucket": "minio-bucket-lock", + "mode": "GOVERNANCE", + "unit": "DAYS", + "validity": 1 + } + } + ] +} diff --git a/connector/testdata/02-get/query/downloadStorageObject/expected.json b/connector/testdata/02-get/query/downloadStorageObject/expected.json new file mode 100644 index 0000000..2a658ff --- /dev/null +++ b/connector/testdata/02-get/query/downloadStorageObject/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": "SVpHWjhMMHZzVw==" }] }] diff --git a/connector/testdata/02-get/query/downloadStorageObject/request.json b/connector/testdata/02-get/query/downloadStorageObject/request.json new file mode 100644 index 0000000..cafedb2 --- /dev/null +++ b/connector/testdata/02-get/query/downloadStorageObject/request.json @@ -0,0 +1,42 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "checksum": { + "type": "literal", + "value": true + }, + "headers": { + "type": "literal", + "value": null + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + }, + "partNumber": { + "type": "literal", + "value": 1 + }, + "requestParams": { + "type": "literal", + "value": null + }, + "versionId": { + "type": "literal", + "value": null + } + }, + "collection": "downloadStorageObject", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/downloadStorageObjectText/expected.json b/connector/testdata/02-get/query/downloadStorageObjectText/expected.json new file mode 100644 index 0000000..1923f9d --- /dev/null +++ b/connector/testdata/02-get/query/downloadStorageObjectText/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": "IZGZ8L0vsW" }] }] diff --git a/connector/testdata/02-get/query/downloadStorageObjectText/request.json b/connector/testdata/02-get/query/downloadStorageObjectText/request.json new file mode 100644 index 0000000..5606d79 --- /dev/null +++ b/connector/testdata/02-get/query/downloadStorageObjectText/request.json @@ -0,0 +1,42 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "checksum": { + "type": "literal", + "value": true + }, + "headers": { + "type": "literal", + "value": null + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + }, + "partNumber": { + "type": "literal", + "value": 1 + }, + "requestParams": { + "type": "literal", + "value": null + }, + "versionId": { + "type": "literal", + "value": null + } + }, + "collection": "downloadStorageObjectText", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketExists/expected.json b/connector/testdata/02-get/query/storageBucketExists/expected.json new file mode 100644 index 0000000..69aafbe --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketExists/expected.json @@ -0,0 +1,9 @@ +[ + { + "rows": [ + { + "__value": true + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageBucketExists/request.json b/connector/testdata/02-get/query/storageBucketExists/request.json new file mode 100644 index 0000000..e6b7636 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketExists/request.json @@ -0,0 +1,22 @@ +{ + "arguments": { + "clientId": { + "type": "literal", + "value": "minio" + }, + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketExists", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketLifecycle/expected.json b/connector/testdata/02-get/query/storageBucketLifecycle/expected.json new file mode 100644 index 0000000..98b9664 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketLifecycle/expected.json @@ -0,0 +1,44 @@ +[ + { + "rows": [ + { + "__value": { + "rules": [ + { + "abortIncompleteMultipartUpload": null, + "allVersionsExpiration": null, + "delMarkerExpiration": null, + "expiration": { + "date": null, + "days": 4, + "expiredObjectAllVersions": null, + "expiredObjectDeleteMarker": false + }, + "filter": { + "and": { + "objectSizeGreaterThan": 1000000, + "objectSizeLessThan": 1, + "prefix": "JSqXP0pJZF", + "tags": [{ "key": "7Uw1or2jrq", "value": "00sA2cRTCB" }] + }, + "objectSizeGreaterThan": null, + "objectSizeLessThan": null, + "prefix": null, + "tag": null + }, + "id": "aXD6eFi5JE", + "noncurrentVersionExpiration": { + "newerNoncurrentVersions": 108, + "noncurrentDays": 12 + }, + "noncurrentVersionTransition": null, + "prefix": "GPJel0xa2s", + "status": "Enabled", + "transition": null + } + ] + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageBucketLifecycle/request.json b/connector/testdata/02-get/query/storageBucketLifecycle/request.json new file mode 100644 index 0000000..3287e85 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketLifecycle/request.json @@ -0,0 +1,253 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketLifecycle", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "rules": { + "column": "rules", + "fields": { + "fields": { + "fields": { + "abortIncompleteMultipartUpload": { + "column": "abortIncompleteMultipartUpload", + "fields": { + "fields": { + "daysAfterInitiation": { + "column": "daysAfterInitiation", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "allVersionsExpiration": { + "column": "allVersionsExpiration", + "fields": { + "fields": { + "days": { + "column": "days", + "type": "column" + }, + "deleteMarker": { + "column": "deleteMarker", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "delMarkerExpiration": { + "column": "delMarkerExpiration", + "fields": { + "fields": { + "days": { + "column": "days", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "expiration": { + "column": "expiration", + "fields": { + "fields": { + "date": { + "column": "date", + "type": "column" + }, + "days": { + "column": "days", + "type": "column" + }, + "expiredObjectAllVersions": { + "column": "expiredObjectAllVersions", + "type": "column" + }, + "expiredObjectDeleteMarker": { + "column": "expiredObjectDeleteMarker", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "filter": { + "column": "filter", + "fields": { + "fields": { + "and": { + "column": "and", + "fields": { + "fields": { + "objectSizeGreaterThan": { + "column": "objectSizeGreaterThan", + "type": "column" + }, + "objectSizeLessThan": { + "column": "objectSizeLessThan", + "type": "column" + }, + "prefix": { + "column": "prefix", + "type": "column" + }, + "tags": { + "column": "tags", + "fields": { + "fields": { + "fields": { + "key": { + "column": "key", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "objectSizeGreaterThan": { + "column": "objectSizeGreaterThan", + "type": "column" + }, + "objectSizeLessThan": { + "column": "objectSizeLessThan", + "type": "column" + }, + "prefix": { + "column": "prefix", + "type": "column" + }, + "tag": { + "column": "tag", + "fields": { + "fields": { + "key": { + "column": "key", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + }, + "noncurrentVersionExpiration": { + "column": "noncurrentVersionExpiration", + "fields": { + "fields": { + "newerNoncurrentVersions": { + "column": "newerNoncurrentVersions", + "type": "column" + }, + "noncurrentDays": { + "column": "noncurrentDays", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "noncurrentVersionTransition": { + "column": "noncurrentVersionTransition", + "fields": { + "fields": { + "newerNoncurrentVersions": { + "column": "newerNoncurrentVersions", + "type": "column" + }, + "noncurrentDays": { + "column": "noncurrentDays", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "prefix": { + "column": "prefix", + "type": "column" + }, + "status": { + "column": "status", + "type": "column" + }, + "transition": { + "column": "transition", + "fields": { + "fields": { + "date": { + "column": "date", + "type": "column" + }, + "days": { + "column": "days", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketNotification/expected.json b/connector/testdata/02-get/query/storageBucketNotification/expected.json new file mode 100644 index 0000000..64f4651 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketNotification/expected.json @@ -0,0 +1,13 @@ +[ + { + "rows": [ + { + "__value": { + "cloudFunctionConfigurations": [], + "queueConfigurations": [], + "topicConfigurations": [] + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageBucketNotification/request.json b/connector/testdata/02-get/query/storageBucketNotification/request.json new file mode 100644 index 0000000..ad9fa73 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketNotification/request.json @@ -0,0 +1,221 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketNotification", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "cloudFunctionConfigurations": { + "column": "cloudFunctionConfigurations", + "fields": { + "fields": { + "fields": { + "arn": { + "column": "arn", + "type": "column" + }, + "cloudFunction": { + "column": "cloudFunction", + "type": "column" + }, + "event": { + "column": "event", + "type": "column" + }, + "filter": { + "column": "filter", + "fields": { + "fields": { + "s3Key": { + "column": "s3Key", + "fields": { + "fields": { + "filterRule": { + "column": "filterRule", + "fields": { + "fields": { + "fields": { + "name": { + "column": "name", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + }, + "queueConfigurations": { + "column": "queueConfigurations", + "fields": { + "fields": { + "fields": { + "arn": { + "column": "arn", + "type": "column" + }, + "event": { + "column": "event", + "type": "column" + }, + "filter": { + "column": "filter", + "fields": { + "fields": { + "s3Key": { + "column": "s3Key", + "fields": { + "fields": { + "filterRule": { + "column": "filterRule", + "fields": { + "fields": { + "fields": { + "name": { + "column": "name", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + }, + "queue": { + "column": "queue", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + }, + "topicConfigurations": { + "column": "topicConfigurations", + "fields": { + "fields": { + "fields": { + "arn": { + "column": "arn", + "type": "column" + }, + "event": { + "column": "event", + "type": "column" + }, + "filter": { + "column": "filter", + "fields": { + "fields": { + "s3Key": { + "column": "s3Key", + "fields": { + "fields": { + "filterRule": { + "column": "filterRule", + "fields": { + "fields": { + "fields": { + "name": { + "column": "name", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + }, + "topic": { + "column": "topic", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketPolicy/expected.json b/connector/testdata/02-get/query/storageBucketPolicy/expected.json new file mode 100644 index 0000000..896fcc9 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketPolicy/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": "" }] }] diff --git a/connector/testdata/02-get/query/storageBucketPolicy/request.json b/connector/testdata/02-get/query/storageBucketPolicy/request.json new file mode 100644 index 0000000..61f71cc --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketPolicy/request.json @@ -0,0 +1,18 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketPolicy", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketReplication/expected.json b/connector/testdata/02-get/query/storageBucketReplication/expected.json new file mode 100644 index 0000000..7f1e655 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketReplication/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": { "role": null, "rules": [] } }] }] diff --git a/connector/testdata/02-get/query/storageBucketReplication/request.json b/connector/testdata/02-get/query/storageBucketReplication/request.json new file mode 100644 index 0000000..a5bef28 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketReplication/request.json @@ -0,0 +1,192 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketReplication", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "role": { + "column": "role", + "type": "column" + }, + "rules": { + "column": "rules", + "fields": { + "fields": { + "fields": { + "deleteMarkerReplication": { + "column": "deleteMarkerReplication", + "fields": { + "fields": { + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "deleteReplication": { + "column": "deleteReplication", + "fields": { + "fields": { + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "destination": { + "column": "destination", + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "existingObjectReplication": { + "column": "existingObjectReplication", + "fields": { + "fields": { + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "filter": { + "column": "filter", + "fields": { + "fields": { + "and": { + "column": "and", + "fields": { + "fields": { + "rrefix": { + "column": "rrefix", + "type": "column" + }, + "tag": { + "column": "tag", + "fields": { + "fields": { + "fields": { + "key": { + "column": "key", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "rrefix": { + "column": "rrefix", + "type": "column" + }, + "tag": { + "column": "tag", + "fields": { + "fields": { + "key": { + "column": "key", + "type": "column" + }, + "value": { + "column": "value", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + }, + "priority": { + "column": "priority", + "type": "column" + }, + "sourceSelectionCriteria": { + "column": "sourceSelectionCriteria", + "fields": { + "fields": { + "replicaModifications": { + "column": "replicaModifications", + "fields": { + "fields": { + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketTags/expected.json b/connector/testdata/02-get/query/storageBucketTags/expected.json new file mode 100644 index 0000000..3e8e7e2 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketTags/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": { "Foo": "bar" } }] }] diff --git a/connector/testdata/02-get/query/storageBucketTags/request.json b/connector/testdata/02-get/query/storageBucketTags/request.json new file mode 100644 index 0000000..33228cc --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketTags/request.json @@ -0,0 +1,18 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketTags", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBucketVersioning/expected.json b/connector/testdata/02-get/query/storageBucketVersioning/expected.json new file mode 100644 index 0000000..e9d6b6d --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketVersioning/expected.json @@ -0,0 +1,14 @@ +[ + { + "rows": [ + { + "__value": { + "excludeFolders": false, + "excludedPrefixes": [], + "mfaDelete": null, + "status": "Enabled" + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageBucketVersioning/request.json b/connector/testdata/02-get/query/storageBucketVersioning/request.json new file mode 100644 index 0000000..374b365 --- /dev/null +++ b/connector/testdata/02-get/query/storageBucketVersioning/request.json @@ -0,0 +1,39 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + } + }, + "collection": "storageBucketVersioning", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "excludeFolders": { + "column": "excludeFolders", + "type": "column" + }, + "excludedPrefixes": { + "column": "excludedPrefixes", + "type": "column" + }, + "mfaDelete": { + "column": "mfaDelete", + "type": "column" + }, + "status": { + "column": "status", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageBuckets/expected.json b/connector/testdata/02-get/query/storageBuckets/expected.json new file mode 100644 index 0000000..48b5932 --- /dev/null +++ b/connector/testdata/02-get/query/storageBuckets/expected.json @@ -0,0 +1,12 @@ +[ + { + "rows": [ + { + "__value": [ + { "name": "minio-bucket-lock" }, + { "name": "minio-bucket-test" } + ] + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageBuckets/request.json b/connector/testdata/02-get/query/storageBuckets/request.json new file mode 100644 index 0000000..7e84e91 --- /dev/null +++ b/connector/testdata/02-get/query/storageBuckets/request.json @@ -0,0 +1,25 @@ +{ + "arguments": {}, + "collection": "storageBuckets", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "fields": { + "name": { + "column": "name", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageIncompleteUploads/expected.json b/connector/testdata/02-get/query/storageIncompleteUploads/expected.json new file mode 100644 index 0000000..3123905 --- /dev/null +++ b/connector/testdata/02-get/query/storageIncompleteUploads/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": [] }] }] diff --git a/connector/testdata/02-get/query/storageIncompleteUploads/request.json b/connector/testdata/02-get/query/storageIncompleteUploads/request.json new file mode 100644 index 0000000..76285ab --- /dev/null +++ b/connector/testdata/02-get/query/storageIncompleteUploads/request.json @@ -0,0 +1,54 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "prefix": { + "type": "literal", + "value": "" + }, + "recursive": { + "type": "literal", + "value": true + } + }, + "collection": "storageIncompleteUploads", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "fields": { + "initiated": { + "column": "initiated", + "type": "column" + }, + "key": { + "column": "key", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + }, + "uploadId": { + "column": "uploadId", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageObject/expected.json b/connector/testdata/02-get/query/storageObject/expected.json new file mode 100644 index 0000000..1718e49 --- /dev/null +++ b/connector/testdata/02-get/query/storageObject/expected.json @@ -0,0 +1,44 @@ +[ + { + "rows": [ + { + "__value": { + "bucket": "minio-bucket-test", + "checksumCrc32": null, + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": "rUiPKiW98se3IJ8h0sXCX33z81tncK1sF4FXb09vrZM=", + "clientId": null, + "contentType": "text/plain", + "etag": "299d2d913ceaa8377d22244db39723ca", + "expiration": null, + "expirationRuleId": null, + "expires": "2099-01-01T00:00:00Z", + "grant": [], + "isDeleteMarker": false, + "isLatest": false, + "metadata": { + "Cache-Control": ["max-age=180, public"], + "Content-Disposition": ["attachment"], + "Content-Encoding": ["gzip"], + "Content-Language": ["en-US"], + "Content-Type": ["text/plain"], + "X-Amz-Meta-Foo": ["Bar"], + "X-Amz-Tagging-Count": ["1"] + }, + "name": "I81XNHrIsl", + "owner": null, + "replicationReady": false, + "replicationStatus": null, + "restore": null, + "size": 10, + "storageClass": null, + "userMetadata": { "Foo": "Bar" }, + "userTagCount": 1, + "userTags": {} + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageObject/request.json b/connector/testdata/02-get/query/storageObject/request.json new file mode 100644 index 0000000..ab8530c --- /dev/null +++ b/connector/testdata/02-get/query/storageObject/request.json @@ -0,0 +1,194 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "checksum": { + "type": "literal", + "value": true + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + } + }, + "collection": "storageObject", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "bucket": { + "column": "bucket", + "type": "column" + }, + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "clientId": { + "column": "clientId", + "type": "column" + }, + "contentType": { + "column": "contentType", + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "expiration": { + "column": "expiration", + "type": "column" + }, + "expirationRuleId": { + "column": "expirationRuleId", + "type": "column" + }, + "expires": { + "column": "expires", + "type": "column" + }, + "grant": { + "column": "grant", + "fields": { + "fields": { + "fields": { + "grantee": { + "column": "grantee", + "fields": { + "fields": { + "displayName": { + "column": "displayName", + "type": "column" + }, + "id": { + "column": "id", + "type": "column" + }, + "uri": { + "column": "uri", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "permission": { + "column": "permission", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + }, + "isDeleteMarker": { + "column": "isDeleteMarker", + "type": "column" + }, + "isLatest": { + "column": "isLatest", + "type": "column" + }, + "metadata": { + "column": "metadata", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + }, + "owner": { + "column": "owner", + "fields": { + "fields": { + "id": { + "column": "id", + "type": "column" + }, + "name": { + "column": "name", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "replicationReady": { + "column": "replicationReady", + "type": "column" + }, + "replicationStatus": { + "column": "replicationStatus", + "type": "column" + }, + "restore": { + "column": "restore", + "fields": { + "fields": { + "expiryTime": { + "column": "expiryTime", + "type": "column" + }, + "ongoingRestore": { + "column": "ongoingRestore", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + }, + "userMetadata": { + "column": "userMetadata", + "type": "column" + }, + "userTagCount": { + "column": "userTagCount", + "type": "column" + }, + "userTags": { + "column": "userTags", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageObjectAttributes/expected.json b/connector/testdata/02-get/query/storageObjectAttributes/expected.json new file mode 100644 index 0000000..3b21d44 --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectAttributes/expected.json @@ -0,0 +1,28 @@ +[ + { + "rows": [ + { + "__value": { + "checksum": { + "checksumCrc32": null, + "checksumCrc32C": null, + "checksumCrc64Nvme": null, + "checksumSha1": null, + "checksumSha256": "rUiPKiW98se3IJ8h0sXCX33z81tncK1sF4FXb09vrZM=" + }, + "etag": "299d2d913ceaa8377d22244db39723ca", + "objectParts": { + "isTruncated": true, + "maxParts": 2, + "nextPartNumberMarker": 0, + "partNumberMarker": 2, + "parts": [], + "partsCount": 1 + }, + "objectSize": 10, + "storageClass": "STANDARD" + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageObjectAttributes/request.json b/connector/testdata/02-get/query/storageObjectAttributes/request.json new file mode 100644 index 0000000..e7ea8d8 --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectAttributes/request.json @@ -0,0 +1,149 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "maxParts": { + "type": "literal", + "value": 2 + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + }, + "partNumberMarker": { + "type": "literal", + "value": 2 + }, + "versionId": { + "type": "literal", + "value": null + } + }, + "collection": "storageObjectAttributes", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "checksum": { + "column": "checksum", + "fields": { + "fields": { + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "etag": { + "column": "etag", + "type": "column" + }, + "objectParts": { + "column": "objectParts", + "fields": { + "fields": { + "isTruncated": { + "column": "isTruncated", + "type": "column" + }, + "maxParts": { + "column": "maxParts", + "type": "column" + }, + "nextPartNumberMarker": { + "column": "nextPartNumberMarker", + "type": "column" + }, + "partNumberMarker": { + "column": "partNumberMarker", + "type": "column" + }, + "parts": { + "column": "parts", + "fields": { + "fields": { + "fields": { + "checksumCrc32": { + "column": "checksumCrc32", + "type": "column" + }, + "checksumCrc32C": { + "column": "checksumCrc32C", + "type": "column" + }, + "checksumCrc64Nvme": { + "column": "checksumCrc64Nvme", + "type": "column" + }, + "checksumSha1": { + "column": "checksumSha1", + "type": "column" + }, + "checksumSha256": { + "column": "checksumSha256", + "type": "column" + }, + "partNumber": { + "column": "partNumber", + "type": "column" + }, + "size": { + "column": "size", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "column" + }, + "partsCount": { + "column": "partsCount", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + }, + "objectSize": { + "column": "objectSize", + "type": "column" + }, + "storageClass": { + "column": "storageClass", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageObjectLegalHold/expected.json b/connector/testdata/02-get/query/storageObjectLegalHold/expected.json new file mode 100644 index 0000000..3f670cd --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectLegalHold/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": "ON" }] }] diff --git a/connector/testdata/02-get/query/storageObjectLegalHold/request.json b/connector/testdata/02-get/query/storageObjectLegalHold/request.json new file mode 100644 index 0000000..9f6ed94 --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectLegalHold/request.json @@ -0,0 +1,22 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-lock" + }, + "object": { + "type": "literal", + "value": "SiKBc6ifDC" + } + }, + "collection": "storageObjectLegalHold", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageObjectLockConfig/expected.json b/connector/testdata/02-get/query/storageObjectLockConfig/expected.json new file mode 100644 index 0000000..64e7baf --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectLockConfig/expected.json @@ -0,0 +1,14 @@ +[ + { + "rows": [ + { + "__value": { + "mode": "GOVERNANCE", + "objectLock": "Enabled", + "unit": "DAYS", + "validity": 1 + } + } + ] + } +] diff --git a/connector/testdata/02-get/query/storageObjectLockConfig/request.json b/connector/testdata/02-get/query/storageObjectLockConfig/request.json new file mode 100644 index 0000000..eea15a8 --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectLockConfig/request.json @@ -0,0 +1,39 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-lock" + } + }, + "collection": "storageObjectLockConfig", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "fields": { + "fields": { + "mode": { + "column": "mode", + "type": "column" + }, + "objectLock": { + "column": "objectLock", + "type": "column" + }, + "unit": { + "column": "unit", + "type": "column" + }, + "validity": { + "column": "validity", + "type": "column" + } + }, + "type": "object" + }, + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storageObjectTags/expected.json b/connector/testdata/02-get/query/storageObjectTags/expected.json new file mode 100644 index 0000000..d9d5bd1 --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectTags/expected.json @@ -0,0 +1 @@ +[{ "rows": [{ "__value": { "UserID": "1" } }] }] diff --git a/connector/testdata/02-get/query/storageObjectTags/request.json b/connector/testdata/02-get/query/storageObjectTags/request.json new file mode 100644 index 0000000..debb6ee --- /dev/null +++ b/connector/testdata/02-get/query/storageObjectTags/request.json @@ -0,0 +1,22 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + } + }, + "collection": "storageObjectTags", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column" + } + } + } +} diff --git a/connector/testdata/02-get/query/storagePresignedDownloadUrl/request.json b/connector/testdata/02-get/query/storagePresignedDownloadUrl/request.json new file mode 100644 index 0000000..90804bd --- /dev/null +++ b/connector/testdata/02-get/query/storagePresignedDownloadUrl/request.json @@ -0,0 +1,43 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "expiry": { + "type": "literal", + "value": "1h" + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + }, + "requestParams": { + "type": "literal", + "value": null + } + }, + "collection": "storagePresignedDownloadUrl", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column", + "fields": { + "fields": { + "url": { + "column": "url", + "type": "column" + }, + "expiredAt": { + "column": "expiredAt", + "type": "column" + } + }, + "type": "object" + } + } + } + } +} diff --git a/connector/testdata/02-get/query/storagePresignedHeadUrl/request.json b/connector/testdata/02-get/query/storagePresignedHeadUrl/request.json new file mode 100644 index 0000000..040a0b4 --- /dev/null +++ b/connector/testdata/02-get/query/storagePresignedHeadUrl/request.json @@ -0,0 +1,47 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "clientId": { + "type": "literal", + "value": "minio" + }, + "expiry": { + "type": "literal", + "value": "1h" + }, + "object": { + "type": "literal", + "value": "I81XNHrIsl" + }, + "requestParams": { + "type": "literal", + "value": null + } + }, + "collection": "storagePresignedHeadUrl", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column", + "fields": { + "fields": { + "url": { + "column": "url", + "type": "column" + }, + "expiredAt": { + "column": "expiredAt", + "type": "column" + } + }, + "type": "object" + } + } + } + } +} diff --git a/connector/testdata/02-get/query/storagePresignedUploadUrl/request.json b/connector/testdata/02-get/query/storagePresignedUploadUrl/request.json new file mode 100644 index 0000000..01cc624 --- /dev/null +++ b/connector/testdata/02-get/query/storagePresignedUploadUrl/request.json @@ -0,0 +1,39 @@ +{ + "arguments": { + "bucket": { + "type": "literal", + "value": "minio-bucket-test" + }, + "expiry": { + "type": "literal", + "value": "1h" + }, + "object": { + "type": "literal", + "value": "8UFsTfY9bI" + } + }, + "collection": "storagePresignedUploadUrl", + "collection_relationships": {}, + "query": { + "fields": { + "__value": { + "column": "__value", + "type": "column", + "fields": { + "fields": { + "url": { + "column": "url", + "type": "column" + }, + "expiredAt": { + "column": "expiredAt", + "type": "column" + } + }, + "type": "object" + } + } + } + } +} diff --git a/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/expected.json b/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/request.json b/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/request.json new file mode 100644 index 0000000..ec886aa --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/01-removeIncompleteStorageUpload/request.json @@ -0,0 +1,13 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeIncompleteStorageUpload", + "arguments": { + "bucket": "minio-bucket-test", + "object": "I81XNHrIsl-2" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/02-removeStorageObject/expected.json b/connector/testdata/03-cleanup/mutation/02-removeStorageObject/expected.json new file mode 100644 index 0000000..777dfc1 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/02-removeStorageObject/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": true, "type": "procedure" }] } diff --git a/connector/testdata/03-cleanup/mutation/02-removeStorageObject/request.json b/connector/testdata/03-cleanup/mutation/02-removeStorageObject/request.json new file mode 100644 index 0000000..9caff82 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/02-removeStorageObject/request.json @@ -0,0 +1,15 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeStorageObject", + "arguments": { + "bucket": "minio-bucket-test", + "forceDelete": true, + "governanceBypass": true, + "object": "I81XNHrIsl-2" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/expected.json b/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/request.json b/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/request.json new file mode 100644 index 0000000..d044538 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/03-removeStorageObjectTags/request.json @@ -0,0 +1,13 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeStorageObjectTags", + "arguments": { + "bucket": "minio-bucket-test", + "object": "a9cSGZGzM2" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/expected.json b/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/expected.json new file mode 100644 index 0000000..9e77ae4 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/expected.json @@ -0,0 +1 @@ +{ "operation_results": [{ "result": [], "type": "procedure" }] } diff --git a/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/request.json b/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/request.json new file mode 100644 index 0000000..df2fbf4 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/04-removeStorageObjects/request.json @@ -0,0 +1,38 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeStorageObjects", + "arguments": { + "bucket": "minio-bucket-test", + "governanceBypass": false, + "maxKeys": 385252679, + "prefix": "", + "recursive": true, + "withMetadata": true, + "withVersions": true + }, + "fields": { + "fields": { + "fields": { + "error": { + "column": "error", + "type": "column" + }, + "objectName": { + "column": "objectName", + "type": "column" + }, + "versionId": { + "column": "versionId", + "type": "column" + } + }, + "type": "object" + }, + "type": "array" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/expected.json b/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/request.json b/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/request.json new file mode 100644 index 0000000..ed8ae2f --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/05-suspendStorageBucketVersioning/request.json @@ -0,0 +1,12 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "suspendStorageBucketVersioning", + "arguments": { + "bucket": "minio-bucket-test" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/expected.json b/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/expected.json new file mode 100644 index 0000000..e28c88a --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} \ No newline at end of file diff --git a/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/request.json b/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/request.json new file mode 100644 index 0000000..7981a39 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/06-removeStorageBucketTags/request.json @@ -0,0 +1,12 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeStorageBucketTags", + "arguments": { + "bucket": "minio-bucket-test" + } + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/expected.json b/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/expected.json new file mode 100644 index 0000000..e62ec74 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/expected.json @@ -0,0 +1,8 @@ +{ + "operation_results": [ + { + "result": true, + "type": "procedure" + } + ] +} diff --git a/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/request.json b/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/request.json new file mode 100644 index 0000000..772e6a7 --- /dev/null +++ b/connector/testdata/03-cleanup/mutation/07-removeStorageBucket/request.json @@ -0,0 +1,12 @@ +{ + "collection_relationships": {}, + "operations": [ + { + "type": "procedure", + "name": "removeStorageBucket", + "arguments": { + "bucket": "minio-bucket-test" + } + } + ] +} diff --git a/connector/types/configuration.go b/connector/types/configuration.go new file mode 100644 index 0000000..9dd5621 --- /dev/null +++ b/connector/types/configuration.go @@ -0,0 +1,43 @@ +package types + +import ( + "errors" + "fmt" + + "github.com/hasura/ndc-storage/connector/storage" +) + +const ( + ConfigurationFileName = "configuration.yaml" +) + +// Configuration contains required settings for the connector. +type Configuration struct { + // List of storage client configurations and credentials + Clients []storage.ClientConfig `json:"clients" yaml:"clients"` + // Settings for concurrent webhook executions to remote servers + Concurrency ConcurrencySettings `json:"concurrency,omitempty" yaml:"concurrency,omitempty"` +} + +// Validate checks if the configuration is valid. +func (c Configuration) Validate() error { + if len(c.Clients) == 0 { + return errors.New("require at least 1 element in the clients array") + } + + for i, c := range c.Clients { + if err := c.Validate(); err != nil { + return fmt.Errorf("invalid client configuration at %d: %w", i, err) + } + } + + return nil +} + +// ConcurrencySettings represent settings for concurrent webhook executions to remote servers. +type ConcurrencySettings struct { + // Maximum number of concurrent executions if there are many query variables. + Query int `json:"query" jsonschema:"min=1,default=10" yaml:"query"` + // Maximum number of concurrent executions if there are many mutation operations. + Mutation int `json:"mutation" jsonschema:"min=1,default=1" yaml:"mutation"` +} diff --git a/connector/types/connector.go b/connector/types/connector.go new file mode 100644 index 0000000..6e120a2 --- /dev/null +++ b/connector/types/connector.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/hasura/ndc-sdk-go/connector" + "github.com/hasura/ndc-storage/connector/storage" +) + +// State is the global state which is shared for every connector request. +type State struct { + *connector.TelemetryState + Storage *storage.Manager +} diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000..36799f8 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,110 @@ +# Configuration + +## Clients + +### General Settings + +The configuration file `configuration.yaml` contains a list of storage clients. Every client has common settings: + +- `id`: the unque identity name of the client. This setting is optional unless there are many configured clients. +- `type`: type of the storage provider. Accept one of `s3`, `gs`. +- `defaultBucket`: the default bucket name. +- `authenticaiton`: the authentication setting. +- `endpoint`: the base endpoint of the storage server. Required for other S3 compatible services such as MinIO, Cloudflare R2, DigitalOcean Spaces, etc... +- `publicHost`: is used to configure the public host for presigned URL generation if the connector communicates privately with the storage server through an internal DNS. If this setting isn't set the host of the generated URL will be a private DNS that isn't accessible from the internet. +- `region`: (optional) region of the bucket going to be created. +- `maxRetries`: maximum number of retry times. Default 10. +- `defaultPresignedExpiry`: the default expiry for presigned URL generation in duration format. The maximum expiry is 7 days \(`168h`\) and minimum is 1 second \(`1s`\). +- `trailingHeaders`: indicates server support of trailing headers. Only supported for v4 signatures. +- `allowedBuckets`: the list of allowed bucket names. This setting prevents users to get buckets and objects outside the list. However, it's recommended to restrict the permissions for the IAM credentials. This setting is useful to let the connector know which buckets belong to this client. The empty value means all buckets are allowed. The storage server will handle the validation. + +### Authentication + +#### Static Credentials + +Configure the authentication type `static` with `accessKeyId` and `secretAccessKey`. `sessionToken` is also supported for [temporary access](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) but for testing only. + +```yaml +clients: + - type: s3 + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY +``` + +#### IAM + +The IAM authentication retrieves credentials from the AWS EC2, ECS or EKS service, and keeps track if those credentials are expired. This authentication method can be used only if the connector is hosted in the AWS ecosystem. + +The following settings are supported: + +- `iamAuthEndpoint` : the optional custom endpoint to fetch IAM role credentials. The client can automatically identify the endpoint if not set. + +### Examples + +#### AWS S3 + +Create [a user access key](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-keys-admin-managed.html) with S3 permissions to configure the Access Key ID and Secret Access Key. + +```yaml +clients: + - id: s3 + type: s3 + defaultBucket: + env: DEFAULT_BUCKET + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY +``` + +#### Google Cloud Storage + +You need to [generate HMAC key](https://cloud.google.com/storage/docs/authentication/hmackeys) to configure the Access Key ID and Secret Access Key. + +```yaml +clients: + - id: gs + type: gs + defaultBucket: + env: DEFAULT_BUCKET + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY +``` + +#### Other S3 compatible services + +You must configure the endpoint URL alongs with Access Key ID and Secret Access Key. + +```yaml +clients: + - id: minio + type: s3 + endpoint: + env: STORAGE_ENDPOINT + defaultBucket: + env: DEFAULT_BUCKET + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY +``` + +#### Cloudflare R2 + +You must configure the endpoint URL alongs with [Access Key ID and Secret Access Key](https://developers.cloudflare.com/r2/api/s3/tokens/#get-s3-api-credentials-from-an-api-token). See [Cloudflare docs](https://developers.cloudflare.com/r2/api/s3/api/) for more context. + +#### DigitalOcean Spaces + +See [Spaces API Reference Documentation](https://docs.digitalocean.com/reference/api/spaces-api/). diff --git a/docs/upload-download.md b/docs/upload-download.md new file mode 100644 index 0000000..55c6c76 --- /dev/null +++ b/docs/upload-download.md @@ -0,0 +1,119 @@ +# Upload / Download Objects + +## Upload Objects + +You can upload object files directly by encoding the file content to base64-encoded string or generate a presigned URL and let the client uploads the file to that URL. Presigned URLs are recommended especially with large files because GraphQL doesn't support file streaming. You have to encode the entire file to string that comsumes a lot of memory. + +### Generate presigned URL (recommended) + +Input the object path and the optional expiry of the presigned URL (if the `defaultPresignedExpiry` setting is configured). + +```gql +query PresignedUploadUrl { + storagePresignedUploadUrl(object: "hello.txt", expiry: "1h") { + url + expiredAt + } +} +``` + +### Direct Upload + +The object data must be encoded as base64 string. + +```gql +mutation UploadObject { + uploadStorageObject(object: "hello.txt", data: "SGVsbG8gd29ybGQK") { + bucket + name + size + etag + } +} +``` + +### Upload Text Objects + +Use the `uploadStorageObjectText` mutation if you are confident that object content is plain text. The request size is least than base64-encoded string 30%. + +```gql +mutation UploadObjectText { + uploadStorageObjectText(object: "hello2.txt", data: "Hello World") { + bucket + name + size + etag + } +} +``` + +## Download Objects + +Similar to upload. You can download object files directly by encoding the file content to base64-encoded string or generate a presigned URL. Presigned URLs are also recommended to avoid memory leaks. + +### Generate presigned URL (recommended) + +Input the object path and the optional expiry of the presigned URL (if the `defaultPresignedExpiry` setting is configured). + +```gql +query GetSignedDownloadURL { + storagePresignedDownloadUrl(object: "hello.txt", expiry: "1h") { + url + expiredAt + } +} +``` + +### Direct Download + +The response is a base64-encode string. The client must decode the string to get the raw content. + +```gql +query DownloadObject { + downloadStorageObject(object: "hello.txt") +} + +# { +# "data": { +# "downloadStorageObject": "SGVsbG8gd29ybGQK" +# } +# } +``` + +### Upload Text Objects + +Use the `uploadStorageObjectText` mutation if you are confident that object content is plain text. + +```gql +query DownloadObjectText { + downloadStorageObjectText(object: "hello.txt") +} + +# { +# "data": { +# "downloadStorageObjectText": "Hello world\n" +# } +# } +``` + +## Multiple clients and buckets + +You can upload to other buckets or services by specifying `clientId` and `bucket` arguments. + +```gql +mutation UploadObject { + uploadStorageObject( + clientId: "gs" + bucket: "other-bucket" + object: "hello.txt" + data: "SGVsbG8gd29ybGQK" + ) { + bucket + name + size + etag + checksumSha256 + lastModified + } +} +``` diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..0b9e7a7 --- /dev/null +++ b/go.mod @@ -0,0 +1,68 @@ +module github.com/hasura/ndc-storage + +go 1.23 + +require ( + github.com/alecthomas/kong v1.6.0 + github.com/hasura/ndc-sdk-go v1.7.0 + github.com/invopop/jsonschema v0.13.0 + github.com/lmittmann/tint v1.0.6 + github.com/minio/minio-go/v7 v7.0.82 + go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel/trace v1.29.0 + golang.org/x/sync v0.10.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + go.opentelemetry.io/contrib/bridges/otelslog v0.4.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.5.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.51.0 // indirect + go.opentelemetry.io/otel/log v0.5.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.5.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gotest.tools/v3 v3.5.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..01c9509 --- /dev/null +++ b/go.sum @@ -0,0 +1,147 @@ +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v1.6.0 h1:mwOzbdMR7uv2vul9J0FU3GYxE7ls/iX1ieMg5WIM6gE= +github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hasura/ndc-sdk-go v1.7.0 h1:AWYERM3aRvFgqXKNr3qa1fuohkSvXH0RFsBKP+kdpbI= +github.com/hasura/ndc-sdk-go v1.7.0/go.mod h1:79hEYe4HuPwFKi6aWjUDo9l/hhCiYFVEykjJsF1abg0= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lmittmann/tint v1.0.6 h1:vkkuDAZXc0EFGNzYjWcV0h7eEX+uujH48f/ifSkJWgc= +github.com/lmittmann/tint v1.0.6/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.82 h1:tWfICLhmp2aFPXL8Tli0XDTHj2VB/fNf0PC1f/i1gRo= +github.com/minio/minio-go/v7 v7.0.82/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +go.opentelemetry.io/contrib/bridges/otelslog v0.4.0 h1:i66F95zqmrf3EyN5gu0E2pjTvCRZo/p8XIYidG3vOP8= +go.opentelemetry.io/contrib/bridges/otelslog v0.4.0/go.mod h1:JuCiVizZ6ovLZLnYk1nGRUEAnmRJLKGh5v8DmwiKlhY= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0 h1:hNjyoRsAACnhoOLWupItUjABzeYmX3GTTZLzwJluJlk= +go.opentelemetry.io/contrib/propagators/b3 v1.29.0/go.mod h1:E76MTitU1Niwo5NSN+mVxkyLu4h4h7Dp/yh38F2WuIU= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.5.0 h1:iWyFL+atC9S1e6MFDLNUZieyKTmsrvsDzuozUDbFg8E= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.5.0/go.mod h1:0Ur7rPCJmkHksYcBywsFXnKBG3pqGl4TGltZ+T3qhSA= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0 h1:4d++HQ+Ihdl+53zSjtsCUFDmNMju2FC9qFkUlTxPLqo= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0/go.mod h1:mQX5dTO3Mh5ZF7bPKDkt5c/7C41u/SiDr9XgTpzXXn8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 h1:xvhQxJ/C9+RTnAj5DpTg7LSM1vbbMTiXt7e9hsfqHNw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0/go.mod h1:Fcvs2Bz1jkDM+Wf5/ozBGmi3tQ/c9zPKLnsipnfhGAo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= +go.opentelemetry.io/otel/exporters/prometheus v0.51.0 h1:G7uexXb/K3T+T9fNLCCKncweEtNEBMTO+46hKX5EdKw= +go.opentelemetry.io/otel/exporters/prometheus v0.51.0/go.mod h1:v0mFe5Kk7woIh938mrZBJBmENYquyA0IICrlYm4Y0t4= +go.opentelemetry.io/otel/log v0.5.0 h1:x1Pr6Y3gnXgl1iFBwtGy1W/mnzENoK0w0ZoaeOI3i30= +go.opentelemetry.io/otel/log v0.5.0/go.mod h1:NU/ozXeGuOR5/mjCRXYbTC00NFJ3NYuraV/7O78F0rE= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/log v0.5.0 h1:A+9lSjlZGxkQOr7QSBJcuyyYBw79CufQ69saiJLey7o= +go.opentelemetry.io/otel/sdk/log v0.5.0/go.mod h1:zjxIW7sw1IHolZL2KlSAtrUi8JHttoeiQy43Yl3WuVQ= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/jsonschema/configuration.schema.json b/jsonschema/configuration.schema.json new file mode 100644 index 0000000..6f4e51b --- /dev/null +++ b/jsonschema/configuration.schema.json @@ -0,0 +1,199 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/hasura/ndc-storage/connector/types/configuration", + "$ref": "#/$defs/Configuration", + "$defs": { + "AuthCredentials": { + "oneOf": [ + { + "properties": { + "type": { + "type": "string", + "enum": [ + "static" + ] + }, + "accessKeyId": { + "$ref": "#/$defs/EnvString" + }, + "secretAccessKey": { + "$ref": "#/$defs/EnvString" + }, + "sessionToken": { + "$ref": "#/$defs/EnvString" + } + }, + "type": "object", + "required": [ + "type", + "accessKeyId", + "secretAccessKey" + ] + }, + { + "properties": { + "type": { + "type": "string", + "enum": [ + "iam" + ] + }, + "iamAuthEndpoint": { + "$ref": "#/$defs/EnvString" + } + }, + "type": "object", + "required": [ + "type" + ] + } + ] + }, + "ClientConfig": { + "properties": { + "id": { + "type": "string" + }, + "type": { + "$ref": "#/$defs/EnvStorageProviderType" + }, + "defaultBucket": { + "$ref": "#/$defs/EnvString" + }, + "endpoint": { + "$ref": "#/$defs/EnvString" + }, + "publicHost": { + "$ref": "#/$defs/EnvString" + }, + "region": { + "oneOf": [ + { + "$ref": "#/$defs/EnvString" + }, + { + "type": "null" + } + ] + }, + "maxRetries": { + "type": "integer", + "default": 10 + }, + "defaultPresignedExpiry": { + "type": "string", + "pattern": "[0-9]+(s|m|h)", + "default": "24h" + }, + "authentication": { + "$ref": "#/$defs/AuthCredentials" + }, + "trailingHeaders": { + "type": "boolean" + }, + "allowedBuckets": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "type", + "defaultBucket", + "authentication" + ] + }, + "ConcurrencySettings": { + "properties": { + "query": { + "type": "integer", + "default": 10 + }, + "mutation": { + "type": "integer", + "default": 1 + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "query", + "mutation" + ] + }, + "Configuration": { + "properties": { + "clients": { + "items": { + "$ref": "#/$defs/ClientConfig" + }, + "type": "array" + }, + "concurrency": { + "$ref": "#/$defs/ConcurrencySettings" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "clients" + ] + }, + "EnvStorageProviderType": { + "anyOf": [ + { + "required": [ + "value" + ] + }, + { + "required": [ + "env" + ] + } + ], + "properties": { + "env": { + "type": "string" + }, + "value": { + "type": "string", + "enum": [ + "s3", + "gs" + ] + } + }, + "type": "object" + }, + "EnvString": { + "anyOf": [ + { + "required": [ + "value" + ], + "title": "value" + }, + { + "required": [ + "env" + ], + "title": "env" + } + ], + "properties": { + "value": { + "type": "string" + }, + "env": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + } + } +} \ No newline at end of file diff --git a/jsonschema/generator.go b/jsonschema/generator.go new file mode 100644 index 0000000..8a7513c --- /dev/null +++ b/jsonschema/generator.go @@ -0,0 +1,40 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/hasura/ndc-storage/connector/types" + "github.com/invopop/jsonschema" +) + +func main() { + if err := jsonSchemaConfiguration(); err != nil { + panic(fmt.Errorf("failed to write jsonschema for Configuration: %w", err)) + } +} + +func jsonSchemaConfiguration() error { + r := new(jsonschema.Reflector) + if err := r.AddGoComments("github.com/hasura/ndc-storage/connector/types", "../connector/types"); err != nil { + return err + } + + if err := r.AddGoComments("github.com/hasura/ndc-storage/connector/storage", "../connector/storage"); err != nil { + return err + } + + if err := r.AddGoComments("github.com/hasura/ndc-storage/connector/storage/common", "../connector/storage/common"); err != nil { + return err + } + + reflectSchema := r.Reflect(&types.Configuration{}) + + schemaBytes, err := json.MarshalIndent(reflectSchema, "", " ") + if err != nil { + return err + } + + return os.WriteFile("configuration.schema.json", schemaBytes, 0o644) +} diff --git a/scripts/build-manifest.sh b/scripts/build-manifest.sh new file mode 100755 index 0000000..0a7ebc4 --- /dev/null +++ b/scripts/build-manifest.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -evo pipefail + +REF=$(git rev-parse --short HEAD) +VERSION=${VERSION:-$REF} +BUILD_DIR=/tmp/ndc-storage +ROOT="$(pwd)" + +rm -rf $BUILD_DIR +mkdir -p $BUILD_DIR + +cp -r connector-definition $BUILD_DIR +sed -i "s/{{VERSION}}/$VERSION/g" $BUILD_DIR/connector-definition/.hasura-connector/connector-metadata.yaml + +mkdir -p "${ROOT}/release" +tar -czvf "${ROOT}/release/connector-definition.tgz" --directory $BUILD_DIR/connector-definition . +echo "checksum of connector-definition.tgz:" +sha256sum "${ROOT}/release/connector-definition.tgz" \ No newline at end of file diff --git a/scripts/test.sh b/scripts/test.sh new file mode 100755 index 0000000..93abd6e --- /dev/null +++ b/scripts/test.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -o pipefail + +trap 'docker compose down -v' EXIT + +mkdir -p ./tmp + +if [ ! -f ./tmp/ndc-test ]; then + curl -L https://github.com/hasura/ndc-spec/releases/download/v0.1.6/ndc-test-x86_64-unknown-linux-gnu -o ./tmp/ndc-test + chmod +x ./tmp/ndc-test +fi + +http_wait() { + printf "$1:\t " + for i in {1..120}; + do + local code="$(curl -s -o /dev/null -m 2 -w '%{http_code}' $1)" + if [ "$code" != "200" ]; then + printf "." + sleep 1 + else + printf "\r\033[K$1:\t ${GREEN}OK${NC}\n" + return 0 + fi + done + printf "\n${RED}ERROR${NC}: cannot connect to $1.\n" + exit 1 +} + +docker compose up -d --build minio s3mock ndc-storage +http_wait http://localhost:8080/health +http_wait http://localhost:9000/minio/health/live + +./tmp/ndc-test test --endpoint http://localhost:8080 + +# go tests +go test -v -coverpkg=./connector/... -race -timeout 3m -coverprofile=coverage.out ./... \ No newline at end of file diff --git a/server/main.go b/server/main.go new file mode 100644 index 0000000..f288500 --- /dev/null +++ b/server/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "github.com/hasura/ndc-sdk-go/connector" + "github.com/hasura/ndc-storage/configuration/version" + storage "github.com/hasura/ndc-storage/connector" +) + +// Start the connector server at http://localhost:8080 +// +// go run . serve +// +// See [NDC Go SDK] for more information. +// +// [NDC Go SDK]: https://github.com/hasura/ndc-sdk-go +func main() { + if err := connector.Start( + &storage.Connector{}, + connector.WithMetricsPrefix("ndc_storage"), + connector.WithDefaultServiceName("ndc-storage"), + connector.WithVersion(version.BuildVersion), + ); err != nil { + panic(err) + } +} diff --git a/tests/configuration/configuration.yaml b/tests/configuration/configuration.yaml new file mode 100644 index 0000000..7df94d6 --- /dev/null +++ b/tests/configuration/configuration.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=../../jsonschema/configuration.schema.json +clients: + - id: minio + type: + value: s3 + endpoint: + env: STORAGE_ENDPOINT + publicHost: + env: PUBLIC_HOST + defaultBucket: + env: DEFAULT_BUCKET + trailingHeaders: true + authentication: + type: static + accessKeyId: + env: ACCESS_KEY_ID + secretAccessKey: + env: SECRET_ACCESS_KEY + - id: s3 + type: + value: s3 + endpoint: + env: S3_STORAGE_ENDPOINT + publicHost: + env: S3_PUBLIC_HOST + defaultBucket: + env: S3_DEFAULT_BUCKET + trailingHeaders: true + allowedBuckets: + - s3-bucket-test + authentication: + type: static + accessKeyId: + env: S3_ACCESS_KEY_ID + secretAccessKey: + env: S3_SECRET_ACCESS_KEY +concurrency: + query: 10 + mutation: 10 diff --git a/tests/engine/.gitattributes b/tests/engine/.gitattributes new file mode 100644 index 0000000..8ddc99f --- /dev/null +++ b/tests/engine/.gitattributes @@ -0,0 +1 @@ +*.hml linguist-language=yaml \ No newline at end of file diff --git a/tests/engine/.gitignore b/tests/engine/.gitignore new file mode 100644 index 0000000..c0c784f --- /dev/null +++ b/tests/engine/.gitignore @@ -0,0 +1,3 @@ +engine/build +/.env +/.env.* diff --git a/tests/engine/.hasura/context.yaml b/tests/engine/.hasura/context.yaml new file mode 100644 index 0000000..3822ed0 --- /dev/null +++ b/tests/engine/.hasura/context.yaml @@ -0,0 +1,14 @@ +kind: Context +version: v3 +definition: + current: default + contexts: + default: + supergraph: ../supergraph.yaml + subgraph: ../app/subgraph.yaml + localEnvFile: ../.env + scripts: + docker-start: + bash: HASURA_DDN_PAT=$(ddn auth print-pat) PROMPTQL_SECRET_KEY=$(ddn auth print-promptql-secret-key) docker compose -f compose.yaml --env-file .env up --build --pull always + powershell: $Env:HASURA_DDN_PAT = ddn auth print-pat; $Env:PROMPTQL_SECRET_KEY = ddn auth print-promptql-secret-key; docker compose -f compose.yaml --env-file .env up --build --pull always + promptQL: false diff --git a/tests/engine/app/metadata/ComposeStorageObject.hml b/tests/engine/app/metadata/ComposeStorageObject.hml new file mode 100644 index 0000000..6c03aaf --- /dev/null +++ b/tests/engine/app/metadata/ComposeStorageObject.hml @@ -0,0 +1,206 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageCopyDestOptions + description: represents options specified by user for CopyObject/ComposeObject APIs. + fields: + - name: bucket + type: String + - name: legalHold + type: StorageLegalHoldStatus + - name: mode + type: StorageRetentionMode + - name: object + type: String! + - name: replaceMetadata + type: Boolean + - name: replaceTags + type: Boolean + - name: retainUntilDate + type: TimestampTz + - name: size + type: Int64 + - name: userMetadata + type: Json + - name: userTags + type: Json + graphql: + typeName: StorageCopyDestOptions + inputTypeName: StorageCopyDestOptionsInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageCopyDestOptions + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageCopyDestOptions + permissions: + - role: admin + output: + allowedFields: + - bucket + - legalHold + - mode + - object + - replaceMetadata + - replaceTags + - retainUntilDate + - size + - userMetadata + - userTags + +--- +kind: ObjectType +version: v1 +definition: + name: StorageCopySrcOptions + description: represents a source object to be copied, using server-side copying APIs. + fields: + - name: bucket + type: String + - name: end + type: Int64 + - name: matchETag + type: String + - name: matchModifiedSince + type: TimestampTz + - name: matchRange + type: Boolean + - name: matchUnmodifiedSince + type: TimestampTz + - name: noMatchETag + type: String + - name: object + type: String! + - name: start + type: Int64 + - name: versionId + type: String + graphql: + typeName: StorageCopySrcOptions + inputTypeName: StorageCopySrcOptionsInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageCopySrcOptions + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageCopySrcOptions + permissions: + - role: admin + output: + allowedFields: + - bucket + - end + - matchETag + - matchModifiedSince + - matchRange + - matchUnmodifiedSince + - noMatchETag + - object + - start + - versionId + +--- +kind: ObjectType +version: v1 +definition: + name: StorageUploadInfo + description: represents the information of the uploaded object. + fields: + - name: bucket + type: String! + - name: checksumCrc32 + type: String + - name: checksumCrc32C + type: String + - name: checksumCrc64Nvme + type: String + - name: checksumSha1 + type: String + - name: checksumSha256 + type: String + - name: etag + type: String! + - name: expiration + type: TimestampTz + - name: expirationRuleId + type: String + - name: lastModified + type: TimestampTz + - name: location + type: String + - name: name + type: String! + - name: size + type: Int64! + - name: versionId + type: String + graphql: + typeName: StorageUploadInfo + inputTypeName: StorageUploadInfoInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageUploadInfo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageUploadInfo + permissions: + - role: admin + output: + allowedFields: + - bucket + - checksumCrc32 + - checksumCrc32C + - checksumCrc64Nvme + - checksumSha1 + - checksumSha256 + - etag + - expiration + - expirationRuleId + - lastModified + - location + - name + - size + - versionId + +--- +kind: Command +version: v1 +definition: + name: ComposeStorageObject + outputType: StorageUploadInfo! + arguments: + - name: clientId + type: StorageClientId + - name: dest + type: StorageCopyDestOptions! + - name: sources + type: "[StorageCopySrcOptions!]!" + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: composeStorageObject + graphql: + rootFieldName: composeStorageObject + rootFieldKind: Mutation + description: creates an object by concatenating a list of source objects using + server-side copying. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: ComposeStorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/CopyStorageObject.hml b/tests/engine/app/metadata/CopyStorageObject.hml new file mode 100644 index 0000000..5128163 --- /dev/null +++ b/tests/engine/app/metadata/CopyStorageObject.hml @@ -0,0 +1,35 @@ +--- +kind: Command +version: v1 +definition: + name: CopyStorageObject + outputType: StorageUploadInfo! + arguments: + - name: clientId + type: StorageClientId + - name: dest + type: StorageCopyDestOptions! + - name: source + type: StorageCopySrcOptions! + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: copyStorageObject + graphql: + rootFieldName: copyStorageObject + rootFieldKind: Mutation + description: creates or replaces an object through server-side copying of an + existing object. It supports conditional copying, copying a part of an + object and server-side encryption of destination and decryption of source. + To copy multiple source objects into a single destination object see the + ComposeObject API. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: CopyStorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/CreateStorageBucket.hml b/tests/engine/app/metadata/CreateStorageBucket.hml new file mode 100644 index 0000000..5a26fa7 --- /dev/null +++ b/tests/engine/app/metadata/CreateStorageBucket.hml @@ -0,0 +1,33 @@ +--- +kind: Command +version: v1 +definition: + name: CreateStorageBucket + outputType: Boolean! + arguments: + - name: clientId + type: StorageClientId + - name: name + type: String! + - name: objectLocking + type: Boolean + - name: region + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: createStorageBucket + graphql: + rootFieldName: createStorageBucket + rootFieldKind: Mutation + description: creates a new bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: CreateStorageBucket + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/DownloadStorageObject.hml b/tests/engine/app/metadata/DownloadStorageObject.hml new file mode 100644 index 0000000..371edac --- /dev/null +++ b/tests/engine/app/metadata/DownloadStorageObject.hml @@ -0,0 +1,42 @@ +--- +kind: Command +version: v1 +definition: + name: DownloadStorageObject + outputType: Bytes + arguments: + - name: bucket + type: String + - name: checksum + type: Boolean + - name: clientId + type: StorageClientId + - name: headers + type: Json + - name: object + type: String! + - name: partNumber + type: Int32 + - name: requestParams + type: Json + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: downloadStorageObject + graphql: + rootFieldName: downloadStorageObject + rootFieldKind: Query + description: returns a stream of the object data. Most of the common errors + occur when reading the stream. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: DownloadStorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/DownloadStorageObjectText.hml b/tests/engine/app/metadata/DownloadStorageObjectText.hml new file mode 100644 index 0000000..0320e6f --- /dev/null +++ b/tests/engine/app/metadata/DownloadStorageObjectText.hml @@ -0,0 +1,42 @@ +--- +kind: Command +version: v1 +definition: + name: DownloadStorageObjectText + outputType: String + arguments: + - name: bucket + type: String + - name: checksum + type: Boolean + - name: clientId + type: StorageClientId + - name: headers + type: Json + - name: object + type: String! + - name: partNumber + type: Int32 + - name: requestParams + type: Json + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: downloadStorageObjectText + graphql: + rootFieldName: downloadStorageObjectText + rootFieldKind: Query + description: returns the object content in plain text. Use this function only if + you know exactly the file as an text file. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: DownloadStorageObjectText + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/EnableStorageBucketVersioning.hml b/tests/engine/app/metadata/EnableStorageBucketVersioning.hml new file mode 100644 index 0000000..baffcd6 --- /dev/null +++ b/tests/engine/app/metadata/EnableStorageBucketVersioning.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: EnableStorageBucketVersioning + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: enableStorageBucketVersioning + graphql: + rootFieldName: enableStorageBucketVersioning + rootFieldKind: Mutation + description: enables bucket versioning support. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: EnableStorageBucketVersioning + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/PutStorageObjectLegalHold.hml b/tests/engine/app/metadata/PutStorageObjectLegalHold.hml new file mode 100644 index 0000000..4ea0813 --- /dev/null +++ b/tests/engine/app/metadata/PutStorageObjectLegalHold.hml @@ -0,0 +1,35 @@ +--- +kind: Command +version: v1 +definition: + name: PutStorageObjectLegalHold + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + - name: status + type: StorageLegalHoldStatus + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: putStorageObjectLegalHold + graphql: + rootFieldName: putStorageObjectLegalHold + rootFieldKind: Mutation + description: applies legal-hold onto an object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: PutStorageObjectLegalHold + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/PutStorageObjectRetention.hml b/tests/engine/app/metadata/PutStorageObjectRetention.hml new file mode 100644 index 0000000..61c8aad --- /dev/null +++ b/tests/engine/app/metadata/PutStorageObjectRetention.hml @@ -0,0 +1,39 @@ +--- +kind: Command +version: v1 +definition: + name: PutStorageObjectRetention + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: governanceBypass + type: Boolean + - name: mode + type: StorageRetentionMode + - name: object + type: String! + - name: retainUntilDate + type: TimestampTz + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: putStorageObjectRetention + graphql: + rootFieldName: putStorageObjectRetention + rootFieldKind: Mutation + description: applies object retention lock onto an object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: PutStorageObjectRetention + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/PutStorageObjectTags.hml b/tests/engine/app/metadata/PutStorageObjectTags.hml new file mode 100644 index 0000000..d51c92b --- /dev/null +++ b/tests/engine/app/metadata/PutStorageObjectTags.hml @@ -0,0 +1,36 @@ +--- +kind: Command +version: v1 +definition: + name: PutStorageObjectTags + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + - name: tags + type: Json! + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: putStorageObjectTags + graphql: + rootFieldName: putStorageObjectTags + rootFieldKind: Mutation + description: sets new object Tags to the given object, replaces/overwrites any + existing tags. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: PutStorageObjectTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveIncompleteStorageUpload.hml b/tests/engine/app/metadata/RemoveIncompleteStorageUpload.hml new file mode 100644 index 0000000..5960bcc --- /dev/null +++ b/tests/engine/app/metadata/RemoveIncompleteStorageUpload.hml @@ -0,0 +1,31 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveIncompleteStorageUpload + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeIncompleteStorageUpload + graphql: + rootFieldName: removeIncompleteStorageUpload + rootFieldKind: Mutation + description: removes a partially uploaded object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveIncompleteStorageUpload + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageBucket.hml b/tests/engine/app/metadata/RemoveStorageBucket.hml new file mode 100644 index 0000000..234dd87 --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageBucket.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveStorageBucket + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageBucket + graphql: + rootFieldName: removeStorageBucket + rootFieldKind: Mutation + description: removes a bucket, bucket should be empty to be successfully removed. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageBucket + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageBucketReplication.hml b/tests/engine/app/metadata/RemoveStorageBucketReplication.hml new file mode 100644 index 0000000..5caec98 --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageBucketReplication.hml @@ -0,0 +1,28 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveStorageBucketReplication + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageBucketReplication + graphql: + rootFieldName: removeStorageBucketReplication + rootFieldKind: Mutation + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageBucketReplication + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageBucketTags.hml b/tests/engine/app/metadata/RemoveStorageBucketTags.hml new file mode 100644 index 0000000..b2cc891 --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageBucketTags.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveStorageBucketTags + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageBucketTags + graphql: + rootFieldName: removeStorageBucketTags + rootFieldKind: Mutation + description: removes all tags on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageBucketTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageObject.hml b/tests/engine/app/metadata/RemoveStorageObject.hml new file mode 100644 index 0000000..c1b1a8b --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageObject.hml @@ -0,0 +1,37 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveStorageObject + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: forceDelete + type: Boolean + - name: governanceBypass + type: Boolean + - name: object + type: String! + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageObject + graphql: + rootFieldName: removeStorageObject + rootFieldKind: Mutation + description: removes an object with some specified options. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageObjectTags.hml b/tests/engine/app/metadata/RemoveStorageObjectTags.hml new file mode 100644 index 0000000..181d6b2 --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageObjectTags.hml @@ -0,0 +1,33 @@ +--- +kind: Command +version: v1 +definition: + name: RemoveStorageObjectTags + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageObjectTags + graphql: + rootFieldName: removeStorageObjectTags + rootFieldKind: Mutation + description: removes Object Tags from the given object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageObjectTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/RemoveStorageObjects.hml b/tests/engine/app/metadata/RemoveStorageObjects.hml new file mode 100644 index 0000000..5ac26bf --- /dev/null +++ b/tests/engine/app/metadata/RemoveStorageObjects.hml @@ -0,0 +1,78 @@ +--- +kind: ObjectType +version: v1 +definition: + name: RemoveStorageObjectError + description: the container of Multi Delete S3 API error. + fields: + - name: error + type: Json + - name: objectName + type: String! + - name: versionId + type: String! + graphql: + typeName: RemoveStorageObjectError + inputTypeName: RemoveStorageObjectErrorInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: RemoveStorageObjectError + +--- +kind: TypePermissions +version: v1 +definition: + typeName: RemoveStorageObjectError + permissions: + - role: admin + output: + allowedFields: + - error + - objectName + - versionId + +--- +kind: Command +version: v1 +definition: + name: RemoveStorageObjects + outputType: "[RemoveStorageObjectError!]!" + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: governanceBypass + type: Boolean + - name: maxKeys + type: Int32! + - name: prefix + type: String! + - name: recursive + type: Boolean! + - name: startAfter + type: String! + - name: withMetadata + type: Boolean! + - name: withVersions + type: Boolean! + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: removeStorageObjects + graphql: + rootFieldName: removeStorageObjects + rootFieldKind: Mutation + description: remove a list of objects obtained from an input channel. The call + sends a delete request to the server up to 1000 objects at a time. The + errors observed are sent over the error channel. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: RemoveStorageObjects + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageBucketEncryption.hml b/tests/engine/app/metadata/SetStorageBucketEncryption.hml new file mode 100644 index 0000000..dc11b15 --- /dev/null +++ b/tests/engine/app/metadata/SetStorageBucketEncryption.hml @@ -0,0 +1,31 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageBucketEncryption + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: rules + type: "[ServerSideEncryptionRule!]!" + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageBucketEncryption + graphql: + rootFieldName: setStorageBucketEncryption + rootFieldKind: Mutation + description: sets default encryption configuration on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageBucketEncryption + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageBucketLifecycle.hml b/tests/engine/app/metadata/SetStorageBucketLifecycle.hml new file mode 100644 index 0000000..cfed1c1 --- /dev/null +++ b/tests/engine/app/metadata/SetStorageBucketLifecycle.hml @@ -0,0 +1,31 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageBucketLifecycle + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: rules + type: "[BucketLifecycleRule!]!" + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageBucketLifecycle + graphql: + rootFieldName: setStorageBucketLifecycle + rootFieldKind: Mutation + description: sets lifecycle on bucket or an object prefix. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageBucketLifecycle + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageBucketNotification.hml b/tests/engine/app/metadata/SetStorageBucketNotification.hml new file mode 100644 index 0000000..5184e04 --- /dev/null +++ b/tests/engine/app/metadata/SetStorageBucketNotification.hml @@ -0,0 +1,35 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageBucketNotification + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: cloudFunctionConfigurations + type: "[NotificationLambdaConfig!]!" + - name: queueConfigurations + type: "[NotificationQueueConfig!]!" + - name: topicConfigurations + type: "[NotificationTopicConfig!]!" + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageBucketNotification + graphql: + rootFieldName: setStorageBucketNotification + rootFieldKind: Mutation + description: sets a new notification configuration on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageBucketNotification + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageBucketReplication.hml b/tests/engine/app/metadata/SetStorageBucketReplication.hml new file mode 100644 index 0000000..4eab61f --- /dev/null +++ b/tests/engine/app/metadata/SetStorageBucketReplication.hml @@ -0,0 +1,35 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageBucketReplication + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: role + type: String + - name: rules + type: "[StorageReplicationRule!]!" + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageBucketReplication + graphql: + rootFieldName: setStorageBucketReplication + rootFieldKind: Mutation + description: sets replication configuration on a bucket. Role can be obtained by + first defining the replication target on MinIO to associate the source and + destination buckets for replication with the replication endpoint. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageBucketReplication + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageBucketTags.hml b/tests/engine/app/metadata/SetStorageBucketTags.hml new file mode 100644 index 0000000..bd55490 --- /dev/null +++ b/tests/engine/app/metadata/SetStorageBucketTags.hml @@ -0,0 +1,31 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageBucketTags + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: tags + type: Json! + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageBucketTags + graphql: + rootFieldName: setStorageBucketTags + rootFieldKind: Mutation + description: sets tags to a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageBucketTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SetStorageObjectLockConfig.hml b/tests/engine/app/metadata/SetStorageObjectLockConfig.hml new file mode 100644 index 0000000..e3aedf5 --- /dev/null +++ b/tests/engine/app/metadata/SetStorageObjectLockConfig.hml @@ -0,0 +1,34 @@ +--- +kind: Command +version: v1 +definition: + name: SetStorageObjectLockConfig + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: mode + type: StorageRetentionMode + - name: unit + type: StorageRetentionValidityUnit + - name: validity + type: Int32 + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: setStorageObjectLockConfig + graphql: + rootFieldName: setStorageObjectLockConfig + rootFieldKind: Mutation + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SetStorageObjectLockConfig + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketEncryption.hml b/tests/engine/app/metadata/StorageBucketEncryption.hml new file mode 100644 index 0000000..9706a01 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketEncryption.hml @@ -0,0 +1,115 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageApplySseByDefault + description: defines default encryption configuration, KMS or SSE. To activate + KMS, SSEAlgoritm needs to be set to `aws:kms“. Minio currently does not + support Kms. + fields: + - name: kmsMasterKeyId + type: String + - name: sseAlgorithm + type: String! + graphql: + typeName: StorageApplySseByDefault + inputTypeName: StorageApplySseByDefaultInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageApplySSEByDefault + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageApplySseByDefault + permissions: + - role: admin + output: + allowedFields: + - kmsMasterKeyId + - sseAlgorithm + +--- +kind: ObjectType +version: v1 +definition: + name: ServerSideEncryptionRule + description: rule layer encapsulates default encryption configuration + fields: + - name: apply + type: StorageApplySseByDefault! + graphql: + typeName: ServerSideEncryptionRule + inputTypeName: ServerSideEncryptionRuleInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: ServerSideEncryptionRule + +--- +kind: TypePermissions +version: v1 +definition: + typeName: ServerSideEncryptionRule + permissions: + - role: admin + output: + allowedFields: + - apply + +--- +kind: ObjectType +version: v1 +definition: + name: ServerSideEncryptionConfiguration + description: is the default encryption configuration structure. + fields: + - name: rules + type: "[ServerSideEncryptionRule!]!" + graphql: + typeName: ServerSideEncryptionConfiguration + inputTypeName: ServerSideEncryptionConfigurationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: ServerSideEncryptionConfiguration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: ServerSideEncryptionConfiguration + permissions: + - role: admin + output: + allowedFields: + - rules + +--- +kind: Command +version: v1 +definition: + name: StorageBucketEncryption + outputType: ServerSideEncryptionConfiguration + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketEncryption + graphql: + rootFieldName: storageBucketEncryption + rootFieldKind: Query + description: gets default encryption configuration set on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketEncryption + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketExists.hml b/tests/engine/app/metadata/StorageBucketExists.hml new file mode 100644 index 0000000..e91378e --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketExists.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: StorageBucketExists + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketExists + graphql: + rootFieldName: storageBucketExists + rootFieldKind: Query + description: checks if a bucket exists. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketExists + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketLifecycle.hml b/tests/engine/app/metadata/StorageBucketLifecycle.hml new file mode 100644 index 0000000..2bd6976 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketLifecycle.hml @@ -0,0 +1,440 @@ +--- +kind: ObjectType +version: v1 +definition: + name: AbortIncompleteMultipartUpload + description: structure, not supported yet on MinIO + fields: + - name: daysAfterInitiation + type: Int32 + graphql: + typeName: AbortIncompleteMultipartUpload + inputTypeName: AbortIncompleteMultipartUploadInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: AbortIncompleteMultipartUpload + +--- +kind: TypePermissions +version: v1 +definition: + typeName: AbortIncompleteMultipartUpload + permissions: + - role: admin + output: + allowedFields: + - daysAfterInitiation + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleAllVersionsExpiration + description: represents AllVersionsExpiration actions element in an ILM policy + fields: + - name: days + type: Int32 + - name: deleteMarker + type: Boolean + graphql: + typeName: LifecycleAllVersionsExpiration + inputTypeName: LifecycleAllVersionsExpirationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleAllVersionsExpiration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleAllVersionsExpiration + permissions: + - role: admin + output: + allowedFields: + - days + - deleteMarker + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleDelMarkerExpiration + description: represents DelMarkerExpiration actions element in an ILM policy + fields: + - name: days + type: Int32 + graphql: + typeName: LifecycleDelMarkerExpiration + inputTypeName: LifecycleDelMarkerExpirationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleDelMarkerExpiration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleDelMarkerExpiration + permissions: + - role: admin + output: + allowedFields: + - days + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleExpiration + description: expiration details of lifecycle configuration + fields: + - name: date + type: Date + - name: days + type: Int32 + - name: expiredObjectAllVersions + type: Boolean + - name: expiredObjectDeleteMarker + type: Boolean + graphql: + typeName: LifecycleExpiration + inputTypeName: LifecycleExpirationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleExpiration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleExpiration + permissions: + - role: admin + output: + allowedFields: + - date + - days + - expiredObjectAllVersions + - expiredObjectDeleteMarker + +--- +kind: ObjectType +version: v1 +definition: + name: StorageTag + description: structure key/value pair representing an object tag to apply configuration + fields: + - name: key + type: String + - name: value + type: String + graphql: + typeName: StorageTag + inputTypeName: StorageTagInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageTag + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageTag + permissions: + - role: admin + output: + allowedFields: + - key + - value + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleFilterAnd + description: the And Rule for LifecycleTag, to be used in LifecycleRuleFilter + fields: + - name: objectSizeGreaterThan + type: Int64 + - name: objectSizeLessThan + type: Int64 + - name: prefix + type: String + - name: tags + type: "[StorageTag!]" + graphql: + typeName: LifecycleFilterAnd + inputTypeName: LifecycleFilterAndInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleFilterAnd + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleFilterAnd + permissions: + - role: admin + output: + allowedFields: + - objectSizeGreaterThan + - objectSizeLessThan + - prefix + - tags + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleFilter + description: will be used in selecting rule(s) for lifecycle configuration + fields: + - name: and + type: LifecycleFilterAnd + - name: objectSizeGreaterThan + type: Int64 + - name: objectSizeLessThan + type: Int64 + - name: prefix + type: String + - name: tag + type: StorageTag + graphql: + typeName: LifecycleFilter + inputTypeName: LifecycleFilterInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleFilter + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleFilter + permissions: + - role: admin + output: + allowedFields: + - and + - objectSizeGreaterThan + - objectSizeLessThan + - prefix + - tag + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleNoncurrentVersionExpiration + description: "- Specifies when noncurrent object versions expire. Upon + expiration, server permanently deletes the noncurrent object versions. Set + this lifecycle configuration action on a bucket that has versioning enabled + (or suspended) to request server delete noncurrent object versions at a + specific period in the object's lifetime." + fields: + - name: newerNoncurrentVersions + type: Int32 + - name: noncurrentDays + type: Int32 + graphql: + typeName: LifecycleNoncurrentVersionExpiration + inputTypeName: LifecycleNoncurrentVersionExpirationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleNoncurrentVersionExpiration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleNoncurrentVersionExpiration + permissions: + - role: admin + output: + allowedFields: + - newerNoncurrentVersions + - noncurrentDays + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleNoncurrentVersionTransition + description: sets this action to request server to transition noncurrent object + versions to different set storage classes at a specific period in the + object's lifetime. + fields: + - name: newerNoncurrentVersions + type: Int32 + - name: noncurrentDays + type: Int32 + - name: storageClass + type: String + graphql: + typeName: LifecycleNoncurrentVersionTransition + inputTypeName: LifecycleNoncurrentVersionTransitionInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleNoncurrentVersionTransition + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleNoncurrentVersionTransition + permissions: + - role: admin + output: + allowedFields: + - newerNoncurrentVersions + - noncurrentDays + - storageClass + +--- +kind: ObjectType +version: v1 +definition: + name: LifecycleTransition + description: transition details of lifecycle configuration + fields: + - name: date + type: Date + - name: days + type: Int32 + - name: storageClass + type: String + graphql: + typeName: LifecycleTransition + inputTypeName: LifecycleTransitionInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: LifecycleTransition + +--- +kind: TypePermissions +version: v1 +definition: + typeName: LifecycleTransition + permissions: + - role: admin + output: + allowedFields: + - date + - days + - storageClass + +--- +kind: ObjectType +version: v1 +definition: + name: BucketLifecycleRule + description: represents a single rule in lifecycle configuration + fields: + - name: abortIncompleteMultipartUpload + type: AbortIncompleteMultipartUpload + - name: allVersionsExpiration + type: LifecycleAllVersionsExpiration + - name: delMarkerExpiration + type: LifecycleDelMarkerExpiration + - name: expiration + type: LifecycleExpiration + - name: filter + type: LifecycleFilter + - name: id + type: String! + - name: noncurrentVersionExpiration + type: LifecycleNoncurrentVersionExpiration + - name: noncurrentVersionTransition + type: LifecycleNoncurrentVersionTransition + - name: prefix + type: String + - name: status + type: String + - name: transition + type: LifecycleTransition + graphql: + typeName: BucketLifecycleRule + inputTypeName: BucketLifecycleRuleInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: BucketLifecycleRule + +--- +kind: TypePermissions +version: v1 +definition: + typeName: BucketLifecycleRule + permissions: + - role: admin + output: + allowedFields: + - abortIncompleteMultipartUpload + - allVersionsExpiration + - delMarkerExpiration + - expiration + - filter + - id + - noncurrentVersionExpiration + - noncurrentVersionTransition + - prefix + - status + - transition + +--- +kind: ObjectType +version: v1 +definition: + name: BucketLifecycleConfiguration + description: is a collection of lifecycle Rule objects. + fields: + - name: rules + type: "[BucketLifecycleRule!]!" + graphql: + typeName: BucketLifecycleConfiguration + inputTypeName: BucketLifecycleConfigurationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: BucketLifecycleConfiguration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: BucketLifecycleConfiguration + permissions: + - role: admin + output: + allowedFields: + - rules + +--- +kind: Command +version: v1 +definition: + name: StorageBucketLifecycle + outputType: BucketLifecycleConfiguration + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketLifecycle + graphql: + rootFieldName: storageBucketLifecycle + rootFieldKind: Query + description: gets lifecycle on a bucket or a prefix. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketLifecycle + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketNotification.hml b/tests/engine/app/metadata/StorageBucketNotification.hml new file mode 100644 index 0000000..d240158 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketNotification.hml @@ -0,0 +1,266 @@ +--- +kind: ObjectType +version: v1 +definition: + name: NotificationFilterRule + description: child of S3Key, a tag in the notification xml which carries + suffix/prefix filters + fields: + - name: name + type: String! + - name: value + type: String! + graphql: + typeName: NotificationFilterRule + inputTypeName: NotificationFilterRuleInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationFilterRule + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationFilterRule + permissions: + - role: admin + output: + allowedFields: + - name + - value + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationS3Key + description: child of Filter, a tag in the notification xml which carries + suffix/prefix filters + fields: + - name: filterRule + type: "[NotificationFilterRule!]" + graphql: + typeName: NotificationS3Key + inputTypeName: NotificationS3KeyInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationS3Key + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationS3Key + permissions: + - role: admin + output: + allowedFields: + - filterRule + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationFilter + description: "- a tag in the notification xml structure which carries + suffix/prefix filters" + fields: + - name: s3Key + type: NotificationS3Key + graphql: + typeName: NotificationFilter + inputTypeName: NotificationFilterInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationFilter + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationFilter + permissions: + - role: admin + output: + allowedFields: + - s3Key + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationLambdaConfig + description: carries one single cloudfunction notification configuration + fields: + - name: arn + type: String + - name: cloudFunction + type: String! + - name: event + type: "[String!]!" + - name: filter + type: NotificationFilter + - name: id + type: String + graphql: + typeName: NotificationLambdaConfig + inputTypeName: NotificationLambdaConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationLambdaConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationLambdaConfig + permissions: + - role: admin + output: + allowedFields: + - arn + - cloudFunction + - event + - filter + - id + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationQueueConfig + description: carries one single queue notification configuration + fields: + - name: arn + type: String + - name: event + type: "[String!]!" + - name: filter + type: NotificationFilter + - name: id + type: String + - name: queue + type: String! + graphql: + typeName: NotificationQueueConfig + inputTypeName: NotificationQueueConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationQueueConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationQueueConfig + permissions: + - role: admin + output: + allowedFields: + - arn + - event + - filter + - id + - queue + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationTopicConfig + description: carries one single topic notification configuration + fields: + - name: arn + type: String + - name: event + type: "[String!]!" + - name: filter + type: NotificationFilter + - name: id + type: String + - name: topic + type: String! + graphql: + typeName: NotificationTopicConfig + inputTypeName: NotificationTopicConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationTopicConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationTopicConfig + permissions: + - role: admin + output: + allowedFields: + - arn + - event + - filter + - id + - topic + +--- +kind: ObjectType +version: v1 +definition: + name: NotificationConfig + description: the struct that represents a notification configration object. + fields: + - name: cloudFunctionConfigurations + type: "[NotificationLambdaConfig!]!" + - name: queueConfigurations + type: "[NotificationQueueConfig!]!" + - name: topicConfigurations + type: "[NotificationTopicConfig!]!" + graphql: + typeName: NotificationConfig + inputTypeName: NotificationConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: NotificationConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: NotificationConfig + permissions: + - role: admin + output: + allowedFields: + - cloudFunctionConfigurations + - queueConfigurations + - topicConfigurations + +--- +kind: Command +version: v1 +definition: + name: StorageBucketNotification + outputType: NotificationConfig + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketNotification + graphql: + rootFieldName: storageBucketNotification + rootFieldKind: Query + description: gets notification configuration on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketNotification + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketPolicy.hml b/tests/engine/app/metadata/StorageBucketPolicy.hml new file mode 100644 index 0000000..8ba5ca5 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketPolicy.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: StorageBucketPolicy + outputType: String! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketPolicy + graphql: + rootFieldName: storageBucketPolicy + rootFieldKind: Query + description: gets access permissions on a bucket or a prefix. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketPolicy + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketReplication.hml b/tests/engine/app/metadata/StorageBucketReplication.hml new file mode 100644 index 0000000..dcad2b4 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketReplication.hml @@ -0,0 +1,339 @@ +--- +kind: ObjectType +version: v1 +definition: + name: DeleteMarkerReplication + description: whether delete markers are replicated - + https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html + fields: + - name: status + type: StorageReplicationRuleStatus! + graphql: + typeName: DeleteMarkerReplication + inputTypeName: DeleteMarkerReplicationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: DeleteMarkerReplication + +--- +kind: TypePermissions +version: v1 +definition: + typeName: DeleteMarkerReplication + permissions: + - role: admin + output: + allowedFields: + - status + +--- +kind: ObjectType +version: v1 +definition: + name: DeleteReplication + description: whether versioned deletes are replicated. This is a MinIO specific extension + fields: + - name: status + type: StorageReplicationRuleStatus! + graphql: + typeName: DeleteReplication + inputTypeName: DeleteReplicationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: DeleteReplication + +--- +kind: TypePermissions +version: v1 +definition: + typeName: DeleteReplication + permissions: + - role: admin + output: + allowedFields: + - status + +--- +kind: ObjectType +version: v1 +definition: + name: StorageReplicationDestination + fields: + - name: bucket + type: String! + - name: storageClass + type: String + graphql: + typeName: StorageReplicationDestination + inputTypeName: StorageReplicationDestinationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageReplicationDestination + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageReplicationDestination + permissions: + - role: admin + output: + allowedFields: + - bucket + - storageClass + +--- +kind: ObjectType +version: v1 +definition: + name: ExistingObjectReplication + description: whether existing object replication is enabled + fields: + - name: status + type: StorageReplicationRuleStatus! + graphql: + typeName: ExistingObjectReplication + inputTypeName: ExistingObjectReplicationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: ExistingObjectReplication + +--- +kind: TypePermissions +version: v1 +definition: + typeName: ExistingObjectReplication + permissions: + - role: admin + output: + allowedFields: + - status + +--- +kind: ObjectType +version: v1 +definition: + name: StorageReplicationFilterAnd + description: "- a tag to combine a prefix and multiple tags for replication + configuration rule." + fields: + - name: rrefix + type: String + - name: tag + type: "[StorageTag!]" + graphql: + typeName: StorageReplicationFilterAnd + inputTypeName: StorageReplicationFilterAndInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageReplicationFilterAnd + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageReplicationFilterAnd + permissions: + - role: admin + output: + allowedFields: + - rrefix + - tag + +--- +kind: ObjectType +version: v1 +definition: + name: StorageReplicationFilter + description: a filter for a replication configuration Rule. + fields: + - name: and + type: StorageReplicationFilterAnd + - name: rrefix + type: String + - name: tag + type: StorageTag + graphql: + typeName: StorageReplicationFilter + inputTypeName: StorageReplicationFilterInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageReplicationFilter + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageReplicationFilter + permissions: + - role: admin + output: + allowedFields: + - and + - rrefix + - tag + +--- +kind: ObjectType +version: v1 +definition: + name: ReplicaModifications + description: specifies if replica modification sync is enabled + fields: + - name: status + type: StorageReplicationRuleStatus! + graphql: + typeName: ReplicaModifications + inputTypeName: ReplicaModificationsInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: ReplicaModifications + +--- +kind: TypePermissions +version: v1 +definition: + typeName: ReplicaModifications + permissions: + - role: admin + output: + allowedFields: + - status + +--- +kind: ObjectType +version: v1 +definition: + name: SourceSelectionCriteria + description: specifies additional source selection criteria in ReplicationConfiguration. + fields: + - name: replicaModifications + type: ReplicaModifications + graphql: + typeName: SourceSelectionCriteria + inputTypeName: SourceSelectionCriteriaInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: SourceSelectionCriteria + +--- +kind: TypePermissions +version: v1 +definition: + typeName: SourceSelectionCriteria + permissions: + - role: admin + output: + allowedFields: + - replicaModifications + +--- +kind: ObjectType +version: v1 +definition: + name: StorageReplicationRule + description: a rule for replication configuration. + fields: + - name: deleteMarkerReplication + type: DeleteMarkerReplication + - name: deleteReplication + type: DeleteReplication + - name: destination + type: StorageReplicationDestination + - name: existingObjectReplication + type: ExistingObjectReplication + - name: filter + type: StorageReplicationFilter! + - name: id + type: String + - name: priority + type: Int32! + - name: sourceSelectionCriteria + type: SourceSelectionCriteria + - name: status + type: StorageReplicationRuleStatus! + graphql: + typeName: StorageReplicationRule + inputTypeName: StorageReplicationRuleInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageReplicationRule + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageReplicationRule + permissions: + - role: admin + output: + allowedFields: + - deleteMarkerReplication + - deleteReplication + - destination + - existingObjectReplication + - filter + - id + - priority + - sourceSelectionCriteria + - status + +--- +kind: ObjectType +version: v1 +definition: + name: StorageReplicationConfig + description: replication configuration specified in + https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html + fields: + - name: role + type: String + - name: rules + type: "[StorageReplicationRule!]!" + graphql: + typeName: StorageReplicationConfig + inputTypeName: StorageReplicationConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageReplicationConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageReplicationConfig + permissions: + - role: admin + output: + allowedFields: + - role + - rules + +--- +kind: Command +version: v1 +definition: + name: StorageBucketReplication + outputType: StorageReplicationConfig + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketReplication + graphql: + rootFieldName: storageBucketReplication + rootFieldKind: Query + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketReplication + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketTags.hml b/tests/engine/app/metadata/StorageBucketTags.hml new file mode 100644 index 0000000..fc20741 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketTags.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: StorageBucketTags + outputType: Json! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketTags + graphql: + rootFieldName: storageBucketTags + rootFieldKind: Query + description: gets tags of a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBucketVersioning.hml b/tests/engine/app/metadata/StorageBucketVersioning.hml new file mode 100644 index 0000000..df60701 --- /dev/null +++ b/tests/engine/app/metadata/StorageBucketVersioning.hml @@ -0,0 +1,65 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageBucketVersioningConfiguration + description: is the versioning configuration structure + fields: + - name: excludeFolders + type: Boolean + - name: excludedPrefixes + type: "[String!]" + - name: mfaDelete + type: String + - name: status + type: String + graphql: + typeName: StorageBucketVersioningConfiguration + inputTypeName: StorageBucketVersioningConfigurationInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageBucketVersioningConfiguration + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageBucketVersioningConfiguration + permissions: + - role: admin + output: + allowedFields: + - excludeFolders + - excludedPrefixes + - mfaDelete + - status + +--- +kind: Command +version: v1 +definition: + name: StorageBucketVersioning + outputType: StorageBucketVersioningConfiguration + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBucketVersioning + graphql: + rootFieldName: storageBucketVersioning + rootFieldKind: Query + description: gets versioning configuration set on a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBucketVersioning + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageBuckets.hml b/tests/engine/app/metadata/StorageBuckets.hml new file mode 100644 index 0000000..ede9f79 --- /dev/null +++ b/tests/engine/app/metadata/StorageBuckets.hml @@ -0,0 +1,57 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageBucketInfo + description: container for bucket metadata. + fields: + - name: creationDate + type: TimestampTz! + - name: name + type: String! + graphql: + typeName: StorageBucketInfo + inputTypeName: StorageBucketInfoInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageBucketInfo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageBucketInfo + permissions: + - role: admin + output: + allowedFields: + - creationDate + - name + +--- +kind: Command +version: v1 +definition: + name: StorageBuckets + outputType: "[StorageBucketInfo!]!" + arguments: + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageBuckets + graphql: + rootFieldName: storageBuckets + rootFieldKind: Query + description: list all buckets. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageBuckets + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageIncompleteUploads.hml b/tests/engine/app/metadata/StorageIncompleteUploads.hml new file mode 100644 index 0000000..95ecf43 --- /dev/null +++ b/tests/engine/app/metadata/StorageIncompleteUploads.hml @@ -0,0 +1,72 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectMultipartInfo + description: container for multipart object metadata. + fields: + - name: initiated + type: TimestampTz + - name: key + type: String + - name: size + type: Int64 + - name: storageClass + type: String + - name: uploadId + type: String + graphql: + typeName: StorageObjectMultipartInfo + inputTypeName: StorageObjectMultipartInfoInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectMultipartInfo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectMultipartInfo + permissions: + - role: admin + output: + allowedFields: + - initiated + - key + - size + - storageClass + - uploadId + +--- +kind: Command +version: v1 +definition: + name: StorageIncompleteUploads + outputType: "[StorageObjectMultipartInfo!]!" + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: prefix + type: String! + - name: recursive + type: Boolean + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageIncompleteUploads + graphql: + rootFieldName: storageIncompleteUploads + rootFieldKind: Query + description: list partially uploaded objects in a bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageIncompleteUploads + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObject.hml b/tests/engine/app/metadata/StorageObject.hml new file mode 100644 index 0000000..806106a --- /dev/null +++ b/tests/engine/app/metadata/StorageObject.hml @@ -0,0 +1,41 @@ +--- +kind: Command +version: v1 +definition: + name: StorageObject + outputType: StorageObject + arguments: + - name: bucket + type: String + - name: checksum + type: Boolean + - name: clientId + type: StorageClientId + - name: headers + type: Json + - name: object + type: String! + - name: partNumber + type: Int32 + - name: requestParams + type: Json + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageObject + graphql: + rootFieldName: storageObject + rootFieldKind: Query + description: fetches metadata of an object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObjectAttributes.hml b/tests/engine/app/metadata/StorageObjectAttributes.hml new file mode 100644 index 0000000..b92f78b --- /dev/null +++ b/tests/engine/app/metadata/StorageObjectAttributes.hml @@ -0,0 +1,207 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectChecksum + description: represents checksum values of the object. + fields: + - name: checksumCrc32 + type: String + - name: checksumCrc32C + type: String + - name: checksumCrc64Nvme + type: String + - name: checksumSha1 + type: String + - name: checksumSha256 + type: String + graphql: + typeName: StorageObjectChecksum + inputTypeName: StorageObjectChecksumInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectChecksum + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectChecksum + permissions: + - role: admin + output: + allowedFields: + - checksumCrc32 + - checksumCrc32C + - checksumCrc64Nvme + - checksumSha1 + - checksumSha256 + +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectAttributePart + fields: + - name: checksumCrc32 + type: String + - name: checksumCrc32C + type: String + - name: checksumCrc64Nvme + type: String + - name: checksumSha1 + type: String + - name: checksumSha256 + type: String + - name: partNumber + type: Int32! + - name: size + type: Int32! + graphql: + typeName: StorageObjectAttributePart + inputTypeName: StorageObjectAttributePartInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectAttributePart + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectAttributePart + permissions: + - role: admin + output: + allowedFields: + - checksumCrc32 + - checksumCrc32C + - checksumCrc64Nvme + - checksumSha1 + - checksumSha256 + - partNumber + - size + +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectParts + fields: + - name: isTruncated + type: Boolean! + - name: maxParts + type: Int32! + - name: nextPartNumberMarker + type: Int32! + - name: partNumberMarker + type: Int32! + - name: parts + type: "[StorageObjectAttributePart]!" + - name: partsCount + type: Int32! + graphql: + typeName: StorageObjectParts + inputTypeName: StorageObjectPartsInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectParts + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectParts + permissions: + - role: admin + output: + allowedFields: + - isTruncated + - maxParts + - nextPartNumberMarker + - partNumberMarker + - parts + - partsCount + +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectAttributes + description: is the response object returned by the GetObjectAttributes API. + fields: + - name: checksum + type: StorageObjectChecksum! + - name: etag + type: String + - name: lastModified + type: TimestampTz! + - name: objectParts + type: StorageObjectParts! + - name: objectSize + type: Int32! + - name: storageClass + type: String! + - name: versionId + type: String + graphql: + typeName: StorageObjectAttributes + inputTypeName: StorageObjectAttributesInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectAttributes + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectAttributes + permissions: + - role: admin + output: + allowedFields: + - checksum + - etag + - lastModified + - objectParts + - objectSize + - storageClass + - versionId + +--- +kind: Command +version: v1 +definition: + name: StorageObjectAttributes + outputType: StorageObjectAttributes + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: maxParts + type: Int32 + - name: object + type: String! + - name: partNumberMarker + type: Int32 + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageObjectAttributes + graphql: + rootFieldName: storageObjectAttributes + rootFieldKind: Query + description: returns a stream of the object data. Most of the common errors + occur when reading the stream. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageObjectAttributes + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObjectLegalHold.hml b/tests/engine/app/metadata/StorageObjectLegalHold.hml new file mode 100644 index 0000000..4df30ca --- /dev/null +++ b/tests/engine/app/metadata/StorageObjectLegalHold.hml @@ -0,0 +1,33 @@ +--- +kind: Command +version: v1 +definition: + name: StorageObjectLegalHold + outputType: StorageLegalHoldStatus! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageObjectLegalHold + graphql: + rootFieldName: storageObjectLegalHold + rootFieldKind: Query + description: returns legal-hold status on a given object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageObjectLegalHold + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObjectLockConfig.hml b/tests/engine/app/metadata/StorageObjectLockConfig.hml new file mode 100644 index 0000000..3cc53cb --- /dev/null +++ b/tests/engine/app/metadata/StorageObjectLockConfig.hml @@ -0,0 +1,64 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageObjectLockConfig + fields: + - name: mode + type: StorageRetentionMode + - name: objectLock + type: String! + - name: unit + type: StorageRetentionValidityUnit + - name: validity + type: Int32 + graphql: + typeName: StorageObjectLockConfig + inputTypeName: StorageObjectLockConfigInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObjectLockConfig + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObjectLockConfig + permissions: + - role: admin + output: + allowedFields: + - mode + - objectLock + - unit + - validity + +--- +kind: Command +version: v1 +definition: + name: StorageObjectLockConfig + outputType: StorageObjectLockConfig + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageObjectLockConfig + graphql: + rootFieldName: storageObjectLockConfig + rootFieldKind: Query + description: gets object lock configuration of given bucket. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageObjectLockConfig + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObjectTags.hml b/tests/engine/app/metadata/StorageObjectTags.hml new file mode 100644 index 0000000..6ecb440 --- /dev/null +++ b/tests/engine/app/metadata/StorageObjectTags.hml @@ -0,0 +1,33 @@ +--- +kind: Command +version: v1 +definition: + name: StorageObjectTags + outputType: Json! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: object + type: String! + - name: versionId + type: String + source: + dataConnectorName: storage + dataConnectorCommand: + function: storageObjectTags + graphql: + rootFieldName: storageObjectTags + rootFieldKind: Query + description: fetches Object Tags from the given object. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StorageObjectTags + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StorageObjects.hml b/tests/engine/app/metadata/StorageObjects.hml new file mode 100644 index 0000000..309deb1 --- /dev/null +++ b/tests/engine/app/metadata/StorageObjects.hml @@ -0,0 +1,394 @@ +--- +kind: ObjectType +version: v1 +definition: + name: StorageGrantee + description: represents the person being granted permissions. + fields: + - name: displayName + type: String + - name: id + type: String + - name: uri + type: String + graphql: + typeName: StorageGrantee + inputTypeName: StorageGranteeInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageGrantee + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageGrantee + permissions: + - role: admin + output: + allowedFields: + - displayName + - id + - uri + +--- +kind: ObjectType +version: v1 +definition: + name: StorageGrant + description: holds grant information. + fields: + - name: grantee + type: StorageGrantee + - name: permission + type: String + graphql: + typeName: StorageGrant + inputTypeName: StorageGrantInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageGrant + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageGrant + permissions: + - role: admin + output: + allowedFields: + - grantee + - permission + +--- +kind: ObjectType +version: v1 +definition: + name: StorageOwner + description: name. + fields: + - name: id + type: String + - name: name + type: String + graphql: + typeName: StorageOwner + inputTypeName: StorageOwnerInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageOwner + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageOwner + permissions: + - role: admin + output: + allowedFields: + - id + - name + +--- +kind: ObjectType +version: v1 +definition: + name: StorageRestoreInfo + description: contains information of the restore operation of an archived object. + fields: + - name: expiryTime + type: TimestampTz + - name: ongoingRestore + type: Boolean! + graphql: + typeName: StorageRestoreInfo + inputTypeName: StorageRestoreInfoInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageRestoreInfo + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageRestoreInfo + permissions: + - role: admin + output: + allowedFields: + - expiryTime + - ongoingRestore + +--- +kind: ObjectType +version: v1 +definition: + name: StorageObject + fields: + - name: bucket + type: BucketName! + - name: checksumCrc32 + type: String + - name: checksumCrc32C + type: String + - name: checksumCrc64Nvme + type: String + - name: checksumSha1 + type: String + - name: checksumSha256 + type: String + - name: clientId + type: StorageClientId! + - name: contentType + type: String! + - name: etag + type: String! + - name: expiration + type: TimestampTz + - name: expirationRuleId + type: String + - name: expires + type: TimestampTz! + - name: grant + type: "[StorageGrant!]" + - name: isDeleteMarker + type: Boolean + - name: isLatest + type: Boolean + - name: lastModified + type: FilterTimestamp! + - name: metadata + type: Json + - name: name + type: ObjectPath! + - name: owner + type: StorageOwner + - name: replicationReady + type: Boolean + - name: replicationStatus + type: String + - name: restore + type: StorageRestoreInfo + - name: size + type: Int64! + - name: storageClass + type: String + - name: userMetadata + type: Json + - name: userTagCount + type: Int32 + - name: userTags + type: Json + - name: versionId + type: String + graphql: + typeName: StorageObject + inputTypeName: StorageObjectInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: StorageObject + +--- +kind: TypePermissions +version: v1 +definition: + typeName: StorageObject + permissions: + - role: admin + output: + allowedFields: + - bucket + - checksumCrc32 + - checksumCrc32C + - checksumCrc64Nvme + - checksumSha1 + - checksumSha256 + - clientId + - contentType + - etag + - expiration + - expirationRuleId + - expires + - grant + - isDeleteMarker + - isLatest + - lastModified + - metadata + - name + - owner + - replicationReady + - replicationStatus + - restore + - size + - storageClass + - userMetadata + - userTagCount + - userTags + - versionId + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageObjectBoolExp + operand: + object: + type: StorageObject + comparableFields: + - fieldName: bucket + booleanExpressionType: BucketNameBoolExp + - fieldName: checksumCrc32 + booleanExpressionType: StringBoolExp + - fieldName: checksumCrc32C + booleanExpressionType: StringBoolExp + - fieldName: checksumCrc64Nvme + booleanExpressionType: StringBoolExp + - fieldName: checksumSha1 + booleanExpressionType: StringBoolExp + - fieldName: checksumSha256 + booleanExpressionType: StringBoolExp + - fieldName: clientId + booleanExpressionType: StorageClientIdBoolExp + - fieldName: contentType + booleanExpressionType: StringBoolExp + - fieldName: etag + booleanExpressionType: StringBoolExp + - fieldName: expiration + booleanExpressionType: TimestampTzBoolExp + - fieldName: expirationRuleId + booleanExpressionType: StringBoolExp + - fieldName: expires + booleanExpressionType: TimestampTzBoolExp + - fieldName: isDeleteMarker + booleanExpressionType: BooleanBoolExp + - fieldName: isLatest + booleanExpressionType: BooleanBoolExp + - fieldName: lastModified + booleanExpressionType: FilterTimestampBoolExp + - fieldName: metadata + booleanExpressionType: JsonBoolExp + - fieldName: name + booleanExpressionType: ObjectPathBoolExp + - fieldName: replicationReady + booleanExpressionType: BooleanBoolExp + - fieldName: replicationStatus + booleanExpressionType: StringBoolExp + - fieldName: size + booleanExpressionType: Int64BoolExp + - fieldName: storageClass + booleanExpressionType: StringBoolExp + - fieldName: userMetadata + booleanExpressionType: JsonBoolExp + - fieldName: userTagCount + booleanExpressionType: Int32BoolExp + - fieldName: userTags + booleanExpressionType: JsonBoolExp + - fieldName: versionId + booleanExpressionType: StringBoolExp + comparableRelationships: [] + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageObjectBoolExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: StorageObjectOrderByExp + operand: + object: + orderedType: StorageObject + orderableFields: + - fieldName: bucket + orderByExpression: BucketNameOrderByExp + - fieldName: checksumCrc32 + orderByExpression: StringOrderByExp + - fieldName: checksumCrc32C + orderByExpression: StringOrderByExp + - fieldName: checksumCrc64Nvme + orderByExpression: StringOrderByExp + - fieldName: checksumSha1 + orderByExpression: StringOrderByExp + - fieldName: checksumSha256 + orderByExpression: StringOrderByExp + - fieldName: clientId + orderByExpression: StorageClientIdOrderByExp + - fieldName: contentType + orderByExpression: StringOrderByExp + - fieldName: etag + orderByExpression: StringOrderByExp + - fieldName: expiration + orderByExpression: TimestampTzOrderByExp + - fieldName: expirationRuleId + orderByExpression: StringOrderByExp + - fieldName: expires + orderByExpression: TimestampTzOrderByExp + - fieldName: isDeleteMarker + orderByExpression: BooleanOrderByExp + - fieldName: isLatest + orderByExpression: BooleanOrderByExp + - fieldName: lastModified + orderByExpression: FilterTimestampOrderByExp + - fieldName: metadata + orderByExpression: JsonOrderByExp + - fieldName: name + orderByExpression: ObjectPathOrderByExp + - fieldName: replicationReady + orderByExpression: BooleanOrderByExp + - fieldName: replicationStatus + orderByExpression: StringOrderByExp + - fieldName: size + orderByExpression: Int64OrderByExp + - fieldName: storageClass + orderByExpression: StringOrderByExp + - fieldName: userMetadata + orderByExpression: JsonOrderByExp + - fieldName: userTagCount + orderByExpression: Int32OrderByExp + - fieldName: userTags + orderByExpression: JsonOrderByExp + - fieldName: versionId + orderByExpression: StringOrderByExp + orderableRelationships: [] + graphql: + expressionTypeName: StorageObjectOrderByExp + +--- +kind: Model +version: v2 +definition: + name: StorageObjects + objectType: StorageObject + arguments: + - name: recursive + type: Boolean + source: + dataConnectorName: storage + collection: storageObjects + filterExpressionType: StorageObjectBoolExp + orderByExpression: StorageObjectOrderByExp + graphql: + selectMany: + queryRootField: storageObjects + subscription: + rootField: storageObjects + selectUniques: [] + argumentsInputType: StorageObjectsArguments + description: The information of an storage object + +--- +kind: ModelPermissions +version: v1 +definition: + modelName: StorageObjects + permissions: + - role: admin + select: + filter: null + allowSubscriptions: true + diff --git a/tests/engine/app/metadata/StoragePresignedDownloadUrl.hml b/tests/engine/app/metadata/StoragePresignedDownloadUrl.hml new file mode 100644 index 0000000..1366db6 --- /dev/null +++ b/tests/engine/app/metadata/StoragePresignedDownloadUrl.hml @@ -0,0 +1,69 @@ +--- +kind: ObjectType +version: v1 +definition: + name: PresignedUrlResponse + description: holds the presigned URL and expiry information. + fields: + - name: expiredAt + type: String! + - name: url + type: String! + graphql: + typeName: PresignedUrlResponse + inputTypeName: PresignedUrlResponseInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: PresignedURLResponse + +--- +kind: TypePermissions +version: v1 +definition: + typeName: PresignedUrlResponse + permissions: + - role: admin + output: + allowedFields: + - expiredAt + - url + +--- +kind: Command +version: v1 +definition: + name: StoragePresignedDownloadUrl + outputType: PresignedUrlResponse! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: expiry + type: Duration + - name: object + type: String! + - name: requestParams + type: Json + source: + dataConnectorName: storage + dataConnectorCommand: + function: storagePresignedDownloadUrl + graphql: + rootFieldName: storagePresignedDownloadUrl + rootFieldKind: Query + description: generates a presigned URL for HTTP GET operations. Browsers/Mobile + clients may point to this URL to directly download objects even if the + bucket is private. This presigned URL can have an associated expiration time + in seconds after which it is no longer operational. The maximum expiry is + 604800 seconds (i.e. 7 days) and minimum is 1 second. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StoragePresignedDownloadUrl + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StoragePresignedHeadUrl.hml b/tests/engine/app/metadata/StoragePresignedHeadUrl.hml new file mode 100644 index 0000000..64d91ca --- /dev/null +++ b/tests/engine/app/metadata/StoragePresignedHeadUrl.hml @@ -0,0 +1,39 @@ +--- +kind: Command +version: v1 +definition: + name: StoragePresignedHeadUrl + outputType: PresignedUrlResponse! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: expiry + type: Duration + - name: object + type: String! + - name: requestParams + type: Json + source: + dataConnectorName: storage + dataConnectorCommand: + function: storagePresignedHeadUrl + graphql: + rootFieldName: storagePresignedHeadUrl + rootFieldKind: Query + description: generates a presigned URL for HTTP HEAD operations. Browsers/Mobile + clients may point to this URL to directly get metadata from objects even if + the bucket is private. This presigned URL can have an associated expiration + time in seconds after which it is no longer operational. The default expiry + is set to 7 days. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StoragePresignedHeadUrl + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/StoragePresignedUploadUrl.hml b/tests/engine/app/metadata/StoragePresignedUploadUrl.hml new file mode 100644 index 0000000..297dc07 --- /dev/null +++ b/tests/engine/app/metadata/StoragePresignedUploadUrl.hml @@ -0,0 +1,37 @@ +--- +kind: Command +version: v1 +definition: + name: StoragePresignedUploadUrl + outputType: PresignedUrlResponse! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: expiry + type: Duration + - name: object + type: String! + source: + dataConnectorName: storage + dataConnectorCommand: + function: storagePresignedUploadUrl + graphql: + rootFieldName: storagePresignedUploadUrl + rootFieldKind: Query + description: generates a presigned URL for HTTP PUT operations. Browsers/Mobile + clients may point to this URL to upload objects directly to a bucket even if + it is private. This presigned URL can have an associated expiration time in + seconds after which it is no longer operational. The default expiry is set + to 7 days. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: StoragePresignedUploadUrl + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/SuspendStorageBucketVersioning.hml b/tests/engine/app/metadata/SuspendStorageBucketVersioning.hml new file mode 100644 index 0000000..459ef51 --- /dev/null +++ b/tests/engine/app/metadata/SuspendStorageBucketVersioning.hml @@ -0,0 +1,29 @@ +--- +kind: Command +version: v1 +definition: + name: SuspendStorageBucketVersioning + outputType: Boolean! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: suspendStorageBucketVersioning + graphql: + rootFieldName: suspendStorageBucketVersioning + rootFieldKind: Mutation + description: disables bucket versioning support. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: SuspendStorageBucketVersioning + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/UploadStorageObject.hml b/tests/engine/app/metadata/UploadStorageObject.hml new file mode 100644 index 0000000..db4a1cd --- /dev/null +++ b/tests/engine/app/metadata/UploadStorageObject.hml @@ -0,0 +1,125 @@ +--- +kind: ObjectType +version: v1 +definition: + name: PutStorageObjectOptions + description: represents options specified by user for PutObject call. + fields: + - name: autoChecksum + type: ChecksumType + - name: cacheControl + type: String + - name: checksum + type: ChecksumType + - name: concurrentStreamParts + type: Boolean + - name: contentDisposition + type: String + - name: contentEncoding + type: String + - name: contentLanguage + type: String + - name: contentType + type: String + - name: disableContentSha256 + type: Boolean + - name: disableMultipart + type: Boolean + - name: expires + type: TimestampTz + - name: legalHold + type: StorageLegalHoldStatus + - name: mode + type: StorageRetentionMode + - name: numThreads + type: Int32 + - name: partSize + type: Int64 + - name: retainUntilDate + type: TimestampTz + - name: sendContentMd5 + type: Boolean + - name: storageClass + type: String + - name: userMetadata + type: Json + - name: userTags + type: Json + - name: websiteRedirectLocation + type: String + graphql: + typeName: PutStorageObjectOptions + inputTypeName: PutStorageObjectOptionsInput + dataConnectorTypeMapping: + - dataConnectorName: storage + dataConnectorObjectType: PutStorageObjectOptions + +--- +kind: TypePermissions +version: v1 +definition: + typeName: PutStorageObjectOptions + permissions: + - role: admin + output: + allowedFields: + - autoChecksum + - cacheControl + - checksum + - concurrentStreamParts + - contentDisposition + - contentEncoding + - contentLanguage + - contentType + - disableContentSha256 + - disableMultipart + - expires + - legalHold + - mode + - numThreads + - partSize + - retainUntilDate + - sendContentMd5 + - storageClass + - userMetadata + - userTags + - websiteRedirectLocation + +--- +kind: Command +version: v1 +definition: + name: UploadStorageObject + outputType: StorageUploadInfo! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: data + type: Bytes! + - name: object + type: String! + - name: options + type: PutStorageObjectOptions + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: uploadStorageObject + graphql: + rootFieldName: uploadStorageObject + rootFieldKind: Mutation + description: uploads object that are less than 128MiB in a single PUT operation. + For objects that are greater than 128MiB in size, PutObject seamlessly + uploads the object as parts of 128MiB or more depending on the actual file + size. The max upload size for an object is 5TB. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: UploadStorageObject + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/UploadStorageObjectText.hml b/tests/engine/app/metadata/UploadStorageObjectText.hml new file mode 100644 index 0000000..5c461ce --- /dev/null +++ b/tests/engine/app/metadata/UploadStorageObjectText.hml @@ -0,0 +1,36 @@ +--- +kind: Command +version: v1 +definition: + name: UploadStorageObjectText + outputType: StorageUploadInfo! + arguments: + - name: bucket + type: String + - name: clientId + type: StorageClientId + - name: data + type: String! + - name: object + type: String! + - name: options + type: PutStorageObjectOptions + source: + dataConnectorName: storage + dataConnectorCommand: + procedure: uploadStorageObjectText + graphql: + rootFieldName: uploadStorageObjectText + rootFieldKind: Mutation + description: uploads object in plain text to the storage server. The file + content is not encoded to base64 so the input size is smaller than 30%. + +--- +kind: CommandPermissions +version: v1 +definition: + commandName: UploadStorageObjectText + permissions: + - role: admin + allowExecution: true + diff --git a/tests/engine/app/metadata/storage-types.hml b/tests/engine/app/metadata/storage-types.hml new file mode 100644 index 0000000..16a795d --- /dev/null +++ b/tests/engine/app/metadata/storage-types.hml @@ -0,0 +1,806 @@ +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Boolean + representation: Boolean + graphql: + comparisonExpressionTypeName: BooleanComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: BucketName + graphql: + typeName: BucketName + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: BucketNameBoolExp + operand: + scalar: + type: BucketName + comparisonOperators: + - name: _eq + argumentType: BucketName! + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: BucketName + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: BucketNameBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: BucketName + representation: BucketName + graphql: + comparisonExpressionTypeName: BucketNameComparisonExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: String + representation: String + graphql: + comparisonExpressionTypeName: StringComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: StorageClientId + graphql: + typeName: StorageClientId + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageClientIdBoolExp + operand: + scalar: + type: StorageClientId + comparisonOperators: + - name: _eq + argumentType: StorageClientId! + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: StorageClientID + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageClientIdBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: StorageClientID + representation: StorageClientId + graphql: + comparisonExpressionTypeName: StorageClientIdComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: TimestampTz + graphql: + typeName: TimestampTz + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: TimestampTzBoolExp + operand: + scalar: + type: TimestampTz + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: TimestampTZ + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: TimestampTzBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: TimestampTZ + representation: TimestampTz + graphql: + comparisonExpressionTypeName: TimestampTzComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: FilterTimestamp + graphql: + typeName: FilterTimestamp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: FilterTimestampBoolExp + operand: + scalar: + type: FilterTimestamp + comparisonOperators: + - name: _gt + argumentType: TimestampTz! + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: FilterTimestamp + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: FilterTimestampBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: FilterTimestamp + representation: FilterTimestamp + graphql: + comparisonExpressionTypeName: FilterTimestampComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: Json + graphql: + typeName: Json + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: JsonBoolExp + operand: + scalar: + type: Json + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: JSON + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: JsonBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: JSON + representation: Json + graphql: + comparisonExpressionTypeName: JsonComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: ObjectPath + graphql: + typeName: ObjectPath + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: ObjectPathBoolExp + operand: + scalar: + type: ObjectPath + comparisonOperators: + - name: _starts_with + argumentType: ObjectPath! + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: ObjectPath + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: ObjectPathBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: ObjectPath + representation: ObjectPath + graphql: + comparisonExpressionTypeName: ObjectPathComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: Int64 + graphql: + typeName: Int64 + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: Int64BoolExp + operand: + scalar: + type: Int64 + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Int64 + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: Int64BoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Int64 + representation: Int64 + graphql: + comparisonExpressionTypeName: Int64ComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: Int32 + graphql: + typeName: Int32 + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: Int32BoolExp + operand: + scalar: + type: Int32 + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Int32 + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: Int32BoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Int32 + representation: Int32 + graphql: + comparisonExpressionTypeName: Int32ComparisonExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StringBoolExp + operand: + scalar: + type: String + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: String + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StringBoolExp + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: BooleanBoolExp + operand: + scalar: + type: Boolean + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Boolean + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: BooleanBoolExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: BucketNameOrderByExp + operand: + scalar: + orderedType: BucketName + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: BucketNameOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: StringOrderByExp + operand: + scalar: + orderedType: String + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: StringOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: StorageClientIdOrderByExp + operand: + scalar: + orderedType: StorageClientId + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: StorageClientIdOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: TimestampTzOrderByExp + operand: + scalar: + orderedType: TimestampTz + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: TimestampTzOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: BooleanOrderByExp + operand: + scalar: + orderedType: Boolean + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: BooleanOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: FilterTimestampOrderByExp + operand: + scalar: + orderedType: FilterTimestamp + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: FilterTimestampOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: JsonOrderByExp + operand: + scalar: + orderedType: Json + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: JsonOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: ObjectPathOrderByExp + operand: + scalar: + orderedType: ObjectPath + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: ObjectPathOrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: Int64OrderByExp + operand: + scalar: + orderedType: Int64 + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: Int64OrderByExp + +--- +kind: OrderByExpression +version: v1 +definition: + name: Int32OrderByExp + operand: + scalar: + orderedType: Int32 + enableOrderByDirections: + enableAll: true + graphql: + expressionTypeName: Int32OrderByExp + +--- +kind: ScalarType +version: v1 +definition: + name: Bytes + graphql: + typeName: Bytes + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: BytesBoolExp + operand: + scalar: + type: Bytes + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Bytes + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: BytesBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Bytes + representation: Bytes + graphql: + comparisonExpressionTypeName: BytesComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: Date + graphql: + typeName: Date + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DateBoolExp + operand: + scalar: + type: Date + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Date + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DateBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Date + representation: Date + graphql: + comparisonExpressionTypeName: DateComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: StorageReplicationRuleStatus + graphql: + typeName: StorageReplicationRuleStatus + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageReplicationRuleStatusBoolExp + operand: + scalar: + type: StorageReplicationRuleStatus + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: StorageReplicationRuleStatus + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageReplicationRuleStatusBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: StorageReplicationRuleStatus + representation: StorageReplicationRuleStatus + graphql: + comparisonExpressionTypeName: StorageReplicationRuleStatusComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: StorageLegalHoldStatus + graphql: + typeName: StorageLegalHoldStatus + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageLegalHoldStatusBoolExp + operand: + scalar: + type: StorageLegalHoldStatus + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: StorageLegalHoldStatus + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageLegalHoldStatusBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: StorageLegalHoldStatus + representation: StorageLegalHoldStatus + graphql: + comparisonExpressionTypeName: StorageLegalHoldStatusComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: StorageRetentionMode + graphql: + typeName: StorageRetentionMode + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageRetentionModeBoolExp + operand: + scalar: + type: StorageRetentionMode + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: StorageRetentionMode + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageRetentionModeBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: StorageRetentionMode + representation: StorageRetentionMode + graphql: + comparisonExpressionTypeName: StorageRetentionModeComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: StorageRetentionValidityUnit + graphql: + typeName: StorageRetentionValidityUnit + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: StorageRetentionValidityUnitBoolExp + operand: + scalar: + type: StorageRetentionValidityUnit + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: StorageRetentionValidityUnit + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: StorageRetentionValidityUnitBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: StorageRetentionValidityUnit + representation: StorageRetentionValidityUnit + graphql: + comparisonExpressionTypeName: StorageRetentionValidityUnitComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: Duration + graphql: + typeName: Duration + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: DurationBoolExp + operand: + scalar: + type: Duration + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: Duration + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: DurationBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: Duration + representation: Duration + graphql: + comparisonExpressionTypeName: DurationComparisonExp + +--- +kind: ScalarType +version: v1 +definition: + name: ChecksumType + graphql: + typeName: ChecksumType + +--- +kind: BooleanExpressionType +version: v1 +definition: + name: ChecksumTypeBoolExp + operand: + scalar: + type: ChecksumType + comparisonOperators: [] + dataConnectorOperatorMapping: + - dataConnectorName: storage + dataConnectorScalarType: ChecksumType + operatorMapping: {} + logicalOperators: + enable: true + isNull: + enable: true + graphql: + typeName: ChecksumTypeBoolExp + +--- +kind: DataConnectorScalarRepresentation +version: v1 +definition: + dataConnectorName: storage + dataConnectorScalarType: ChecksumType + representation: ChecksumType + graphql: + comparisonExpressionTypeName: ChecksumTypeComparisonExp + diff --git a/tests/engine/app/metadata/storage.hml b/tests/engine/app/metadata/storage.hml new file mode 100644 index 0000000..dc09e58 --- /dev/null +++ b/tests/engine/app/metadata/storage.hml @@ -0,0 +1,2957 @@ +kind: DataConnectorLink +version: v1 +definition: + name: storage + url: + readWriteUrls: + read: + valueFromEnv: APP_STORAGE_READ_URL + write: + valueFromEnv: APP_STORAGE_WRITE_URL + schema: + version: v0.1 + schema: + scalar_types: + Boolean: + representation: + type: boolean + aggregate_functions: {} + comparison_operators: {} + BucketName: + representation: + type: string + aggregate_functions: {} + comparison_operators: + _eq: + type: equal + Bytes: + representation: + type: bytes + aggregate_functions: {} + comparison_operators: {} + ChecksumType: + representation: + type: enum + one_of: + - SHA256 + - SHA1 + - CRC32 + - CRC32C + - CRC64NVME + - FullObjectCRC32 + - FullObjectCRC32C + - None + aggregate_functions: {} + comparison_operators: {} + Date: + representation: + type: date + aggregate_functions: {} + comparison_operators: {} + Duration: + representation: + type: json + aggregate_functions: {} + comparison_operators: {} + FilterTimestamp: + representation: + type: timestamptz + aggregate_functions: {} + comparison_operators: + _gt: + type: custom + argument_type: + type: named + name: TimestampTZ + Int32: + representation: + type: int32 + aggregate_functions: {} + comparison_operators: {} + Int64: + representation: + type: int64 + aggregate_functions: {} + comparison_operators: {} + JSON: + representation: + type: json + aggregate_functions: {} + comparison_operators: {} + ObjectPath: + representation: + type: string + aggregate_functions: {} + comparison_operators: + _starts_with: + type: custom + argument_type: + type: named + name: ObjectPath + StorageClientID: + representation: + type: enum + one_of: + - minio + - s3 + aggregate_functions: {} + comparison_operators: + _eq: + type: equal + StorageLegalHoldStatus: + representation: + type: enum + one_of: + - "ON" + - "OFF" + aggregate_functions: {} + comparison_operators: {} + StorageObjectReplicationStatus: + representation: + type: enum + one_of: + - COMPLETED + - PENDING + - FAILED + - REPLICA + aggregate_functions: {} + comparison_operators: {} + StorageReplicationRuleStatus: + representation: + type: enum + one_of: + - Enabled + - Disabled + aggregate_functions: {} + comparison_operators: {} + StorageRetentionMode: + representation: + type: enum + one_of: + - GOVERNANCE + - COMPLIANCE + aggregate_functions: {} + comparison_operators: {} + StorageRetentionValidityUnit: + representation: + type: enum + one_of: + - DAYS + - YEARS + aggregate_functions: {} + comparison_operators: {} + String: + representation: + type: string + aggregate_functions: {} + comparison_operators: {} + TimestampTZ: + representation: + type: timestamptz + aggregate_functions: {} + comparison_operators: {} + object_types: + AbortIncompleteMultipartUpload: + description: structure, not supported yet on MinIO + fields: + daysAfterInitiation: + type: + type: nullable + underlying_type: + type: named + name: Int32 + BucketLifecycleConfiguration: + description: is a collection of lifecycle Rule objects. + fields: + rules: + type: + type: array + element_type: + type: named + name: BucketLifecycleRule + BucketLifecycleRule: + description: represents a single rule in lifecycle configuration + fields: + abortIncompleteMultipartUpload: + type: + type: nullable + underlying_type: + type: named + name: AbortIncompleteMultipartUpload + allVersionsExpiration: + type: + type: nullable + underlying_type: + type: named + name: LifecycleAllVersionsExpiration + delMarkerExpiration: + type: + type: nullable + underlying_type: + type: named + name: LifecycleDelMarkerExpiration + expiration: + type: + type: nullable + underlying_type: + type: named + name: LifecycleExpiration + filter: + type: + type: nullable + underlying_type: + type: named + name: LifecycleFilter + id: + type: + type: named + name: String + noncurrentVersionExpiration: + type: + type: nullable + underlying_type: + type: named + name: LifecycleNoncurrentVersionExpiration + noncurrentVersionTransition: + type: + type: nullable + underlying_type: + type: named + name: LifecycleNoncurrentVersionTransition + prefix: + type: + type: nullable + underlying_type: + type: named + name: String + status: + type: + type: nullable + underlying_type: + type: named + name: String + transition: + type: + type: nullable + underlying_type: + type: named + name: LifecycleTransition + DeleteMarkerReplication: + description: whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html + fields: + status: + type: + type: named + name: StorageReplicationRuleStatus + DeleteReplication: + description: whether versioned deletes are replicated. This is a MinIO specific extension + fields: + status: + type: + type: named + name: StorageReplicationRuleStatus + ExistingObjectReplication: + description: whether existing object replication is enabled + fields: + status: + type: + type: named + name: StorageReplicationRuleStatus + LifecycleAllVersionsExpiration: + description: represents AllVersionsExpiration actions element in an ILM policy + fields: + days: + type: + type: nullable + underlying_type: + type: named + name: Int32 + deleteMarker: + type: + type: nullable + underlying_type: + type: named + name: Boolean + LifecycleDelMarkerExpiration: + description: represents DelMarkerExpiration actions element in an ILM policy + fields: + days: + type: + type: nullable + underlying_type: + type: named + name: Int32 + LifecycleExpiration: + description: expiration details of lifecycle configuration + fields: + date: + type: + type: nullable + underlying_type: + type: named + name: Date + days: + type: + type: nullable + underlying_type: + type: named + name: Int32 + expiredObjectAllVersions: + type: + type: nullable + underlying_type: + type: named + name: Boolean + expiredObjectDeleteMarker: + type: + type: nullable + underlying_type: + type: named + name: Boolean + LifecycleFilter: + description: will be used in selecting rule(s) for lifecycle configuration + fields: + and: + type: + type: nullable + underlying_type: + type: named + name: LifecycleFilterAnd + objectSizeGreaterThan: + type: + type: nullable + underlying_type: + type: named + name: Int64 + objectSizeLessThan: + type: + type: nullable + underlying_type: + type: named + name: Int64 + prefix: + type: + type: nullable + underlying_type: + type: named + name: String + tag: + type: + type: nullable + underlying_type: + type: named + name: StorageTag + LifecycleFilterAnd: + description: the And Rule for LifecycleTag, to be used in LifecycleRuleFilter + fields: + objectSizeGreaterThan: + type: + type: nullable + underlying_type: + type: named + name: Int64 + objectSizeLessThan: + type: + type: nullable + underlying_type: + type: named + name: Int64 + prefix: + type: + type: nullable + underlying_type: + type: named + name: String + tags: + type: + type: nullable + underlying_type: + type: array + element_type: + type: named + name: StorageTag + LifecycleNoncurrentVersionExpiration: + description: '- Specifies when noncurrent object versions expire. Upon expiration, server permanently deletes the noncurrent object versions. Set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request server delete noncurrent object versions at a specific period in the object''s lifetime.' + fields: + newerNoncurrentVersions: + type: + type: nullable + underlying_type: + type: named + name: Int32 + noncurrentDays: + type: + type: nullable + underlying_type: + type: named + name: Int32 + LifecycleNoncurrentVersionTransition: + description: sets this action to request server to transition noncurrent object versions to different set storage classes at a specific period in the object's lifetime. + fields: + newerNoncurrentVersions: + type: + type: nullable + underlying_type: + type: named + name: Int32 + noncurrentDays: + type: + type: nullable + underlying_type: + type: named + name: Int32 + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + LifecycleTransition: + description: transition details of lifecycle configuration + fields: + date: + type: + type: nullable + underlying_type: + type: named + name: Date + days: + type: + type: nullable + underlying_type: + type: named + name: Int32 + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + ListStorageObjectsOptions: + description: holds all options of a list object request. + fields: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + maxKeys: + type: + type: named + name: Int32 + prefix: + type: + type: named + name: String + recursive: + type: + type: named + name: Boolean + startAfter: + type: + type: named + name: String + withMetadata: + type: + type: named + name: Boolean + withVersions: + type: + type: named + name: Boolean + NotificationCommonConfig: + description: '- represents one single notification configuration such as topic, queue or lambda configuration.' + fields: + arn: + type: + type: nullable + underlying_type: + type: named + name: String + event: + type: + type: array + element_type: + type: named + name: String + filter: + type: + type: nullable + underlying_type: + type: named + name: NotificationFilter + id: + type: + type: nullable + underlying_type: + type: named + name: String + NotificationConfig: + description: the struct that represents a notification configration object. + fields: + cloudFunctionConfigurations: + type: + type: array + element_type: + type: named + name: NotificationLambdaConfig + queueConfigurations: + type: + type: array + element_type: + type: named + name: NotificationQueueConfig + topicConfigurations: + type: + type: array + element_type: + type: named + name: NotificationTopicConfig + NotificationFilter: + description: '- a tag in the notification xml structure which carries suffix/prefix filters' + fields: + s3Key: + type: + type: nullable + underlying_type: + type: named + name: NotificationS3Key + NotificationFilterRule: + description: child of S3Key, a tag in the notification xml which carries suffix/prefix filters + fields: + name: + type: + type: named + name: String + value: + type: + type: named + name: String + NotificationLambdaConfig: + description: carries one single cloudfunction notification configuration + fields: + arn: + type: + type: nullable + underlying_type: + type: named + name: String + cloudFunction: + type: + type: named + name: String + event: + type: + type: array + element_type: + type: named + name: String + filter: + type: + type: nullable + underlying_type: + type: named + name: NotificationFilter + id: + type: + type: nullable + underlying_type: + type: named + name: String + NotificationQueueConfig: + description: carries one single queue notification configuration + fields: + arn: + type: + type: nullable + underlying_type: + type: named + name: String + event: + type: + type: array + element_type: + type: named + name: String + filter: + type: + type: nullable + underlying_type: + type: named + name: NotificationFilter + id: + type: + type: nullable + underlying_type: + type: named + name: String + queue: + type: + type: named + name: String + NotificationS3Key: + description: child of Filter, a tag in the notification xml which carries suffix/prefix filters + fields: + filterRule: + type: + type: nullable + underlying_type: + type: array + element_type: + type: named + name: NotificationFilterRule + NotificationTopicConfig: + description: carries one single topic notification configuration + fields: + arn: + type: + type: nullable + underlying_type: + type: named + name: String + event: + type: + type: array + element_type: + type: named + name: String + filter: + type: + type: nullable + underlying_type: + type: named + name: NotificationFilter + id: + type: + type: nullable + underlying_type: + type: named + name: String + topic: + type: + type: named + name: String + PresignedURLResponse: + description: holds the presigned URL and expiry information. + fields: + expiredAt: + type: + type: named + name: String + url: + type: + type: named + name: String + PutStorageObjectArguments: + description: represents input arguments of the PutObject method. + fields: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + options: + type: + type: nullable + underlying_type: + type: named + name: PutStorageObjectOptions + PutStorageObjectOptions: + description: represents options specified by user for PutObject call. + fields: + autoChecksum: + type: + type: nullable + underlying_type: + type: named + name: ChecksumType + cacheControl: + type: + type: nullable + underlying_type: + type: named + name: String + checksum: + type: + type: nullable + underlying_type: + type: named + name: ChecksumType + concurrentStreamParts: + type: + type: nullable + underlying_type: + type: named + name: Boolean + contentDisposition: + type: + type: nullable + underlying_type: + type: named + name: String + contentEncoding: + type: + type: nullable + underlying_type: + type: named + name: String + contentLanguage: + type: + type: nullable + underlying_type: + type: named + name: String + contentType: + type: + type: nullable + underlying_type: + type: named + name: String + disableContentSha256: + type: + type: nullable + underlying_type: + type: named + name: Boolean + disableMultipart: + type: + type: nullable + underlying_type: + type: named + name: Boolean + expires: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + legalHold: + type: + type: nullable + underlying_type: + type: named + name: StorageLegalHoldStatus + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + numThreads: + type: + type: nullable + underlying_type: + type: named + name: Int32 + partSize: + type: + type: nullable + underlying_type: + type: named + name: Int64 + retainUntilDate: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + sendContentMd5: + type: + type: nullable + underlying_type: + type: named + name: Boolean + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + userMetadata: + type: + type: nullable + underlying_type: + type: named + name: JSON + userTags: + type: + type: nullable + underlying_type: + type: named + name: JSON + websiteRedirectLocation: + type: + type: nullable + underlying_type: + type: named + name: String + RemoveStorageObjectError: + description: the container of Multi Delete S3 API error. + fields: + error: + type: + type: nullable + underlying_type: + type: named + name: JSON + objectName: + type: + type: named + name: String + versionId: + type: + type: named + name: String + ReplicaModifications: + description: specifies if replica modification sync is enabled + fields: + status: + type: + type: named + name: StorageReplicationRuleStatus + ServerSideEncryptionConfiguration: + description: is the default encryption configuration structure. + fields: + rules: + type: + type: array + element_type: + type: named + name: ServerSideEncryptionRule + ServerSideEncryptionRule: + description: rule layer encapsulates default encryption configuration + fields: + apply: + type: + type: named + name: StorageApplySSEByDefault + SetStorageObjectLockConfig: + description: represents the object lock configuration options in given bucket + fields: + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + unit: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionValidityUnit + validity: + type: + type: nullable + underlying_type: + type: named + name: Int32 + SourceSelectionCriteria: + description: specifies additional source selection criteria in ReplicationConfiguration. + fields: + replicaModifications: + type: + type: nullable + underlying_type: + type: named + name: ReplicaModifications + StorageApplySSEByDefault: + description: defines default encryption configuration, KMS or SSE. To activate KMS, SSEAlgoritm needs to be set to `aws:kms“. Minio currently does not support Kms. + fields: + kmsMasterKeyId: + type: + type: nullable + underlying_type: + type: named + name: String + sseAlgorithm: + type: + type: named + name: String + StorageBucketArguments: + description: represent the common input arguments for bucket-related methods. + fields: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + StorageBucketInfo: + description: container for bucket metadata. + fields: + creationDate: + type: + type: named + name: TimestampTZ + name: + type: + type: named + name: String + StorageBucketVersioningConfiguration: + description: is the versioning configuration structure + fields: + excludeFolders: + type: + type: nullable + underlying_type: + type: named + name: Boolean + excludedPrefixes: + type: + type: nullable + underlying_type: + type: array + element_type: + type: named + name: String + mfaDelete: + type: + type: nullable + underlying_type: + type: named + name: String + status: + type: + type: nullable + underlying_type: + type: named + name: String + StorageCopyDestOptions: + description: represents options specified by user for CopyObject/ComposeObject APIs. + fields: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + legalHold: + type: + type: nullable + underlying_type: + type: named + name: StorageLegalHoldStatus + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + object: + type: + type: named + name: String + replaceMetadata: + type: + type: nullable + underlying_type: + type: named + name: Boolean + replaceTags: + type: + type: nullable + underlying_type: + type: named + name: Boolean + retainUntilDate: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + size: + type: + type: nullable + underlying_type: + type: named + name: Int64 + userMetadata: + type: + type: nullable + underlying_type: + type: named + name: JSON + userTags: + type: + type: nullable + underlying_type: + type: named + name: JSON + StorageCopySrcOptions: + description: represents a source object to be copied, using server-side copying APIs. + fields: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + end: + type: + type: nullable + underlying_type: + type: named + name: Int64 + matchETag: + type: + type: nullable + underlying_type: + type: named + name: String + matchModifiedSince: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + matchRange: + type: + type: nullable + underlying_type: + type: named + name: Boolean + matchUnmodifiedSince: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + noMatchETag: + type: + type: nullable + underlying_type: + type: named + name: String + object: + type: + type: named + name: String + start: + type: + type: nullable + underlying_type: + type: named + name: Int64 + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + StorageGrant: + description: holds grant information. + fields: + grantee: + type: + type: nullable + underlying_type: + type: named + name: StorageGrantee + permission: + type: + type: nullable + underlying_type: + type: named + name: String + StorageGrantee: + description: represents the person being granted permissions. + fields: + displayName: + type: + type: nullable + underlying_type: + type: named + name: String + id: + type: + type: nullable + underlying_type: + type: named + name: String + uri: + type: + type: nullable + underlying_type: + type: named + name: String + StorageObject: + fields: + bucket: + type: + type: named + name: BucketName + checksumCrc32: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc32C: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc64Nvme: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha1: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha256: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: named + name: StorageClientID + contentType: + type: + type: named + name: String + etag: + type: + type: named + name: String + expiration: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + expirationRuleId: + type: + type: nullable + underlying_type: + type: named + name: String + expires: + type: + type: named + name: TimestampTZ + grant: + type: + type: nullable + underlying_type: + type: array + element_type: + type: named + name: StorageGrant + isDeleteMarker: + type: + type: nullable + underlying_type: + type: named + name: Boolean + isLatest: + type: + type: nullable + underlying_type: + type: named + name: Boolean + lastModified: + type: + type: named + name: FilterTimestamp + metadata: + type: + type: nullable + underlying_type: + type: named + name: JSON + name: + type: + type: named + name: ObjectPath + owner: + type: + type: nullable + underlying_type: + type: named + name: StorageOwner + replicationReady: + type: + type: nullable + underlying_type: + type: named + name: Boolean + replicationStatus: + type: + type: nullable + underlying_type: + type: named + name: String + restore: + type: + type: nullable + underlying_type: + type: named + name: StorageRestoreInfo + size: + type: + type: named + name: Int64 + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + userMetadata: + type: + type: nullable + underlying_type: + type: named + name: JSON + userTagCount: + type: + type: nullable + underlying_type: + type: named + name: Int32 + userTags: + type: + type: nullable + underlying_type: + type: named + name: JSON + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + StorageObjectAttributePart: + fields: + checksumCrc32: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc32C: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc64Nvme: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha1: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha256: + type: + type: nullable + underlying_type: + type: named + name: String + partNumber: + type: + type: named + name: Int32 + size: + type: + type: named + name: Int32 + StorageObjectAttributes: + description: is the response object returned by the GetObjectAttributes API. + fields: + checksum: + type: + type: named + name: StorageObjectChecksum + etag: + type: + type: nullable + underlying_type: + type: named + name: String + lastModified: + type: + type: named + name: TimestampTZ + objectParts: + type: + type: named + name: StorageObjectParts + objectSize: + type: + type: named + name: Int32 + storageClass: + type: + type: named + name: String + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + StorageObjectAttributesResponse: + description: contains details returned by the GetObjectAttributes API. + fields: + checksum: + type: + type: named + name: StorageObjectChecksum + etag: + type: + type: nullable + underlying_type: + type: named + name: String + objectParts: + type: + type: named + name: StorageObjectParts + objectSize: + type: + type: named + name: Int32 + storageClass: + type: + type: named + name: String + StorageObjectChecksum: + description: represents checksum values of the object. + fields: + checksumCrc32: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc32C: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc64Nvme: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha1: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha256: + type: + type: nullable + underlying_type: + type: named + name: String + StorageObjectLockConfig: + fields: + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + objectLock: + type: + type: named + name: String + unit: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionValidityUnit + validity: + type: + type: nullable + underlying_type: + type: named + name: Int32 + StorageObjectMultipartInfo: + description: container for multipart object metadata. + fields: + initiated: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + key: + type: + type: nullable + underlying_type: + type: named + name: String + size: + type: + type: nullable + underlying_type: + type: named + name: Int64 + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + uploadId: + type: + type: nullable + underlying_type: + type: named + name: String + StorageObjectParts: + fields: + isTruncated: + type: + type: named + name: Boolean + maxParts: + type: + type: named + name: Int32 + nextPartNumberMarker: + type: + type: named + name: Int32 + partNumberMarker: + type: + type: named + name: Int32 + parts: + type: + type: array + element_type: + type: nullable + underlying_type: + type: named + name: StorageObjectAttributePart + partsCount: + type: + type: named + name: Int32 + StorageOwner: + description: name. + fields: + id: + type: + type: nullable + underlying_type: + type: named + name: String + name: + type: + type: nullable + underlying_type: + type: named + name: String + StorageReplicationConfig: + description: replication configuration specified in https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html + fields: + role: + type: + type: nullable + underlying_type: + type: named + name: String + rules: + type: + type: array + element_type: + type: named + name: StorageReplicationRule + StorageReplicationDestination: + fields: + bucket: + type: + type: named + name: String + storageClass: + type: + type: nullable + underlying_type: + type: named + name: String + StorageReplicationFilter: + description: a filter for a replication configuration Rule. + fields: + and: + type: + type: nullable + underlying_type: + type: named + name: StorageReplicationFilterAnd + rrefix: + type: + type: nullable + underlying_type: + type: named + name: String + tag: + type: + type: nullable + underlying_type: + type: named + name: StorageTag + StorageReplicationFilterAnd: + description: '- a tag to combine a prefix and multiple tags for replication configuration rule.' + fields: + rrefix: + type: + type: nullable + underlying_type: + type: named + name: String + tag: + type: + type: nullable + underlying_type: + type: array + element_type: + type: named + name: StorageTag + StorageReplicationRule: + description: a rule for replication configuration. + fields: + deleteMarkerReplication: + type: + type: nullable + underlying_type: + type: named + name: DeleteMarkerReplication + deleteReplication: + type: + type: nullable + underlying_type: + type: named + name: DeleteReplication + destination: + type: + type: nullable + underlying_type: + type: named + name: StorageReplicationDestination + existingObjectReplication: + type: + type: nullable + underlying_type: + type: named + name: ExistingObjectReplication + filter: + type: + type: named + name: StorageReplicationFilter + id: + type: + type: nullable + underlying_type: + type: named + name: String + priority: + type: + type: named + name: Int32 + sourceSelectionCriteria: + type: + type: nullable + underlying_type: + type: named + name: SourceSelectionCriteria + status: + type: + type: named + name: StorageReplicationRuleStatus + StorageRestoreInfo: + description: contains information of the restore operation of an archived object. + fields: + expiryTime: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + ongoingRestore: + type: + type: named + name: Boolean + StorageTag: + description: structure key/value pair representing an object tag to apply configuration + fields: + key: + type: + type: nullable + underlying_type: + type: named + name: String + value: + type: + type: nullable + underlying_type: + type: named + name: String + StorageUploadInfo: + description: represents the information of the uploaded object. + fields: + bucket: + type: + type: named + name: String + checksumCrc32: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc32C: + type: + type: nullable + underlying_type: + type: named + name: String + checksumCrc64Nvme: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha1: + type: + type: nullable + underlying_type: + type: named + name: String + checksumSha256: + type: + type: nullable + underlying_type: + type: named + name: String + etag: + type: + type: named + name: String + expiration: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + expirationRuleId: + type: + type: nullable + underlying_type: + type: named + name: String + lastModified: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + location: + type: + type: nullable + underlying_type: + type: named + name: String + name: + type: + type: named + name: String + size: + type: + type: named + name: Int64 + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + collections: + - name: storageObjects + description: The information of an storage object + arguments: + recursive: + type: + type: nullable + underlying_type: + type: named + name: Boolean + type: StorageObject + uniqueness_constraints: {} + foreign_keys: {} + functions: + - name: downloadStorageObject + description: returns a stream of the object data. Most of the common errors occur when reading the stream. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + checksum: + type: + type: nullable + underlying_type: + type: named + name: Boolean + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + headers: + type: + type: nullable + underlying_type: + type: named + name: JSON + object: + type: + type: named + name: String + partNumber: + type: + type: nullable + underlying_type: + type: named + name: Int32 + requestParams: + type: + type: nullable + underlying_type: + type: named + name: JSON + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: nullable + underlying_type: + type: named + name: Bytes + - name: downloadStorageObjectText + description: returns the object content in plain text. Use this function only if you know exactly the file as an text file. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + checksum: + type: + type: nullable + underlying_type: + type: named + name: Boolean + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + headers: + type: + type: nullable + underlying_type: + type: named + name: JSON + object: + type: + type: named + name: String + partNumber: + type: + type: nullable + underlying_type: + type: named + name: Int32 + requestParams: + type: + type: nullable + underlying_type: + type: named + name: JSON + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: nullable + underlying_type: + type: named + name: String + - name: storageBucketEncryption + description: gets default encryption configuration set on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: ServerSideEncryptionConfiguration + - name: storageBucketExists + description: checks if a bucket exists. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: storageBucketLifecycle + description: gets lifecycle on a bucket or a prefix. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: BucketLifecycleConfiguration + - name: storageBucketNotification + description: gets notification configuration on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: NotificationConfig + - name: storageBucketPolicy + description: gets access permissions on a bucket or a prefix. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: String + - name: storageBucketReplication + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: StorageReplicationConfig + - name: storageBucketTags + description: gets tags of a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: JSON + - name: storageBucketVersioning + description: gets versioning configuration set on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: StorageBucketVersioningConfiguration + - name: storageBuckets + description: list all buckets. + arguments: + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: array + element_type: + type: named + name: StorageBucketInfo + - name: storageIncompleteUploads + description: list partially uploaded objects in a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + prefix: + type: + type: named + name: String + recursive: + type: + type: nullable + underlying_type: + type: named + name: Boolean + result_type: + type: array + element_type: + type: named + name: StorageObjectMultipartInfo + - name: storageObject + description: fetches metadata of an object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + checksum: + type: + type: nullable + underlying_type: + type: named + name: Boolean + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + headers: + type: + type: nullable + underlying_type: + type: named + name: JSON + object: + type: + type: named + name: String + partNumber: + type: + type: nullable + underlying_type: + type: named + name: Int32 + requestParams: + type: + type: nullable + underlying_type: + type: named + name: JSON + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: nullable + underlying_type: + type: named + name: StorageObject + - name: storageObjectAttributes + description: returns a stream of the object data. Most of the common errors occur when reading the stream. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + maxParts: + type: + type: nullable + underlying_type: + type: named + name: Int32 + object: + type: + type: named + name: String + partNumberMarker: + type: + type: nullable + underlying_type: + type: named + name: Int32 + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: nullable + underlying_type: + type: named + name: StorageObjectAttributes + - name: storageObjectLegalHold + description: returns legal-hold status on a given object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: StorageLegalHoldStatus + - name: storageObjectLockConfig + description: gets object lock configuration of given bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: nullable + underlying_type: + type: named + name: StorageObjectLockConfig + - name: storageObjectTags + description: fetches Object Tags from the given object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: JSON + - name: storagePresignedDownloadUrl + description: generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The maximum expiry is 604800 seconds (i.e. 7 days) and minimum is 1 second. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + expiry: + type: + type: nullable + underlying_type: + type: named + name: Duration + object: + type: + type: named + name: String + requestParams: + type: + type: nullable + underlying_type: + type: named + name: JSON + result_type: + type: named + name: PresignedURLResponse + - name: storagePresignedHeadUrl + description: generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + expiry: + type: + type: nullable + underlying_type: + type: named + name: Duration + object: + type: + type: named + name: String + requestParams: + type: + type: nullable + underlying_type: + type: named + name: JSON + result_type: + type: named + name: PresignedURLResponse + - name: storagePresignedUploadUrl + description: generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + expiry: + type: + type: nullable + underlying_type: + type: named + name: Duration + object: + type: + type: named + name: String + result_type: + type: named + name: PresignedURLResponse + procedures: + - name: composeStorageObject + description: creates an object by concatenating a list of source objects using server-side copying. + arguments: + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + dest: + type: + type: named + name: StorageCopyDestOptions + sources: + type: + type: array + element_type: + type: named + name: StorageCopySrcOptions + result_type: + type: named + name: StorageUploadInfo + - name: copyStorageObject + description: creates or replaces an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. To copy multiple source objects into a single destination object see the ComposeObject API. + arguments: + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + dest: + type: + type: named + name: StorageCopyDestOptions + source: + type: + type: named + name: StorageCopySrcOptions + result_type: + type: named + name: StorageUploadInfo + - name: createStorageBucket + description: creates a new bucket. + arguments: + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + name: + type: + type: named + name: String + objectLocking: + type: + type: nullable + underlying_type: + type: named + name: Boolean + region: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: enableStorageBucketVersioning + description: enables bucket versioning support. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: putStorageObjectLegalHold + description: applies legal-hold onto an object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + status: + type: + type: nullable + underlying_type: + type: named + name: StorageLegalHoldStatus + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: putStorageObjectRetention + description: applies object retention lock onto an object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + governanceBypass: + type: + type: nullable + underlying_type: + type: named + name: Boolean + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + object: + type: + type: named + name: String + retainUntilDate: + type: + type: nullable + underlying_type: + type: named + name: TimestampTZ + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: putStorageObjectTags + description: sets new object Tags to the given object, replaces/overwrites any existing tags. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + tags: + type: + type: named + name: JSON + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: removeIncompleteStorageUpload + description: removes a partially uploaded object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: removeStorageBucket + description: removes a bucket, bucket should be empty to be successfully removed. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: removeStorageBucketReplication + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: removeStorageBucketTags + description: removes all tags on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: removeStorageObject + description: removes an object with some specified options. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + forceDelete: + type: + type: nullable + underlying_type: + type: named + name: Boolean + governanceBypass: + type: + type: nullable + underlying_type: + type: named + name: Boolean + object: + type: + type: named + name: String + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: removeStorageObjectTags + description: removes Object Tags from the given object. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + object: + type: + type: named + name: String + versionId: + type: + type: nullable + underlying_type: + type: named + name: String + result_type: + type: named + name: Boolean + - name: removeStorageObjects + description: remove a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + governanceBypass: + type: + type: nullable + underlying_type: + type: named + name: Boolean + maxKeys: + type: + type: named + name: Int32 + prefix: + type: + type: named + name: String + recursive: + type: + type: named + name: Boolean + startAfter: + type: + type: named + name: String + withMetadata: + type: + type: named + name: Boolean + withVersions: + type: + type: named + name: Boolean + result_type: + type: array + element_type: + type: named + name: RemoveStorageObjectError + - name: setStorageBucketEncryption + description: sets default encryption configuration on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + rules: + type: + type: array + element_type: + type: named + name: ServerSideEncryptionRule + result_type: + type: named + name: Boolean + - name: setStorageBucketLifecycle + description: sets lifecycle on bucket or an object prefix. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + rules: + type: + type: array + element_type: + type: named + name: BucketLifecycleRule + result_type: + type: named + name: Boolean + - name: setStorageBucketNotification + description: sets a new notification configuration on a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + cloudFunctionConfigurations: + type: + type: array + element_type: + type: named + name: NotificationLambdaConfig + queueConfigurations: + type: + type: array + element_type: + type: named + name: NotificationQueueConfig + topicConfigurations: + type: + type: array + element_type: + type: named + name: NotificationTopicConfig + result_type: + type: named + name: Boolean + - name: setStorageBucketReplication + description: sets replication configuration on a bucket. Role can be obtained by first defining the replication target on MinIO to associate the source and destination buckets for replication with the replication endpoint. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + role: + type: + type: nullable + underlying_type: + type: named + name: String + rules: + type: + type: array + element_type: + type: named + name: StorageReplicationRule + result_type: + type: named + name: Boolean + - name: setStorageBucketTags + description: sets tags to a bucket. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + tags: + type: + type: named + name: JSON + result_type: + type: named + name: Boolean + - name: setStorageObjectLockConfig + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + mode: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionMode + unit: + type: + type: nullable + underlying_type: + type: named + name: StorageRetentionValidityUnit + validity: + type: + type: nullable + underlying_type: + type: named + name: Int32 + result_type: + type: named + name: Boolean + - name: suspendStorageBucketVersioning + description: disables bucket versioning support. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + result_type: + type: named + name: Boolean + - name: uploadStorageObject + description: uploads object that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + data: + type: + type: named + name: Bytes + object: + type: + type: named + name: String + options: + type: + type: nullable + underlying_type: + type: named + name: PutStorageObjectOptions + result_type: + type: named + name: StorageUploadInfo + - name: uploadStorageObjectText + description: uploads object in plain text to the storage server. The file content is not encoded to base64 so the input size is smaller than 30%. + arguments: + bucket: + type: + type: nullable + underlying_type: + type: named + name: String + clientId: + type: + type: nullable + underlying_type: + type: named + name: StorageClientID + data: + type: + type: named + name: String + object: + type: + type: named + name: String + options: + type: + type: nullable + underlying_type: + type: named + name: PutStorageObjectOptions + result_type: + type: named + name: StorageUploadInfo + capabilities: + version: 0.1.6 + capabilities: + query: + variables: {} + nested_fields: {} + exists: {} + mutation: {} diff --git a/tests/engine/app/subgraph.yaml b/tests/engine/app/subgraph.yaml new file mode 100644 index 0000000..5c5b149 --- /dev/null +++ b/tests/engine/app/subgraph.yaml @@ -0,0 +1,14 @@ +kind: Subgraph +version: v2 +definition: + name: app + generator: + rootPath: . + namingConvention: graphql + includePaths: + - metadata + envMapping: + APP_STORAGE_READ_URL: + fromEnv: APP_STORAGE_READ_URL + APP_STORAGE_WRITE_URL: + fromEnv: APP_STORAGE_WRITE_URL diff --git a/tests/engine/compose.yaml b/tests/engine/compose.yaml new file mode 100644 index 0000000..d671dd4 --- /dev/null +++ b/tests/engine/compose.yaml @@ -0,0 +1,39 @@ +services: + engine: + build: + context: engine + dockerfile: Dockerfile.engine + pull: true + environment: + AUTHN_CONFIG_PATH: /md/auth_config.json + ENABLE_CORS: "true" + ENABLE_SQL_INTERFACE: "true" + INTROSPECTION_METADATA_FILE: /md/metadata.json + METADATA_PATH: /md/open_dd.json + OTLP_ENDPOINT: http://local.hasura.dev:4317 + extra_hosts: + - local.hasura.dev:host-gateway + labels: + io.hasura.ddn.service-name: engine + ports: + - 3280:3000 + + otel-collector: + command: + - --config=/etc/otel-collector-config.yaml + environment: + HASURA_DDN_PAT: ${HASURA_DDN_PAT:-test} + image: otel/opentelemetry-collector:0.104.0 + ports: + - 4317:4317 + - 4318:4318 + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + + jaeger: + image: jaegertracing/all-in-one:1.60 + restart: always + ports: + - 16686:16686 + environment: + COLLECTOR_OTLP_ENABLED: "true" diff --git a/tests/engine/engine/Dockerfile.engine b/tests/engine/engine/Dockerfile.engine new file mode 100644 index 0000000..3613f0e --- /dev/null +++ b/tests/engine/engine/Dockerfile.engine @@ -0,0 +1,2 @@ +FROM ghcr.io/hasura/v3-engine +COPY ./build /md/ \ No newline at end of file diff --git a/tests/engine/globals/metadata/auth-config.hml b/tests/engine/globals/metadata/auth-config.hml new file mode 100644 index 0000000..54c0b84 --- /dev/null +++ b/tests/engine/globals/metadata/auth-config.hml @@ -0,0 +1,7 @@ +kind: AuthConfig +version: v2 +definition: + mode: + noAuth: + role: admin + sessionVariables: {} diff --git a/tests/engine/globals/metadata/compatibility-config.hml b/tests/engine/globals/metadata/compatibility-config.hml new file mode 100644 index 0000000..62ee84c --- /dev/null +++ b/tests/engine/globals/metadata/compatibility-config.hml @@ -0,0 +1,2 @@ +kind: CompatibilityConfig +date: "2024-12-18" diff --git a/tests/engine/globals/metadata/graphql-config.hml b/tests/engine/globals/metadata/graphql-config.hml new file mode 100644 index 0000000..f54210c --- /dev/null +++ b/tests/engine/globals/metadata/graphql-config.hml @@ -0,0 +1,36 @@ +kind: GraphqlConfig +version: v1 +definition: + query: + rootOperationTypeName: Query + argumentsInput: + fieldName: args + limitInput: + fieldName: limit + offsetInput: + fieldName: offset + filterInput: + fieldName: where + operatorNames: + and: _and + or: _or + not: _not + isNull: _is_null + orderByInput: + fieldName: order_by + enumDirectionValues: + asc: Asc + desc: Desc + enumTypeNames: + - directions: + - Asc + - Desc + typeName: OrderBy + aggregate: + filterInputFieldName: filter_input + countFieldName: _count + countDistinctFieldName: _count_distinct + mutation: + rootOperationTypeName: Mutation + subscription: + rootOperationTypeName: Subscription diff --git a/tests/engine/globals/subgraph.yaml b/tests/engine/globals/subgraph.yaml new file mode 100644 index 0000000..b21faca --- /dev/null +++ b/tests/engine/globals/subgraph.yaml @@ -0,0 +1,8 @@ +kind: Subgraph +version: v2 +definition: + name: globals + generator: + rootPath: . + includePaths: + - metadata diff --git a/tests/engine/hasura.yaml b/tests/engine/hasura.yaml new file mode 100644 index 0000000..7f8f5cc --- /dev/null +++ b/tests/engine/hasura.yaml @@ -0,0 +1 @@ +version: v3 diff --git a/tests/engine/otel-collector-config.yaml b/tests/engine/otel-collector-config.yaml new file mode 100644 index 0000000..555a1ca --- /dev/null +++ b/tests/engine/otel-collector-config.yaml @@ -0,0 +1,30 @@ +exporters: + otlp: + endpoint: https://gateway.otlp.hasura.io:443 + headers: + Authorization: pat ${env:HASURA_DDN_PAT} + + otlp/jaeger: + endpoint: jaeger:4317 + tls: + insecure: true + +processors: + batch: {} +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +service: + pipelines: + traces: + exporters: + - otlp + - otlp/jaeger + processors: + - batch + receivers: + - otlp diff --git a/tests/engine/supergraph.yaml b/tests/engine/supergraph.yaml new file mode 100644 index 0000000..0d9260e --- /dev/null +++ b/tests/engine/supergraph.yaml @@ -0,0 +1,6 @@ +kind: Supergraph +version: v2 +definition: + subgraphs: + - globals/subgraph.yaml + - app/subgraph.yaml diff --git a/tests/minio/create-bucket.sh b/tests/minio/create-bucket.sh new file mode 100755 index 0000000..bf45dfc --- /dev/null +++ b/tests/minio/create-bucket.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +until (/usr/bin/mc config host add minio $STORAGE_ENDPOINT $ACCESS_KEY_ID $SECRET_ACCESS_KEY) +do + echo '...waiting...' + sleep 1 +done + +/usr/bin/mc mb minio/$DEFAULT_BUCKET +/usr/bin/mc policy set public minio/$DEFAULT_BUCKET \ No newline at end of file