diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index ad1b1fd7cd..00aae2f74c 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -121,6 +121,9 @@ spec: azure: archiveTier: "hot" restoreTimeout: "15s" + scuba: + logging: + logLevel: debug ingress: workloadPlaneClass: 'nginx' controlPlaneClass: 'nginx-control-plane' diff --git a/tests/ctst/common/hooks.ts b/tests/ctst/common/hooks.ts index 35c54c3a55..44070b2211 100644 --- a/tests/ctst/common/hooks.ts +++ b/tests/ctst/common/hooks.ts @@ -18,7 +18,7 @@ import { process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0'; const { atMostOnePicklePerTag } = parallelCanAssignHelpers; -const noParallelRun = atMostOnePicklePerTag(['@AfterAll', '@PRA', '@ColdStorage']); +const noParallelRun = atMostOnePicklePerTag(['@AfterAll', '@PRA', '@ColdStorage', '@Utilization']); setParallelCanAssign(noParallelRun); @@ -55,7 +55,13 @@ After(async function (this: Zenko, results) { ); }); -After({ tags: '@Quotas' }, async function () { +After({ tags: '@Quotas' }, async function (this: Zenko, results) { + if (results.result?.status === 'FAILED') { + this.logger.warn('quota was not cleaned for test', { + bucket: this.getSaved('bucketName'), + }); + return; + } await teardownQuotaScenarios(this as Zenko); }); diff --git a/tests/ctst/features/quotas/CountItems.feature b/tests/ctst/features/CountItems/CountItems.feature similarity index 100% rename from tests/ctst/features/quotas/CountItems.feature rename to tests/ctst/features/CountItems/CountItems.feature diff --git a/tests/ctst/features/quotas/Quotas.feature b/tests/ctst/features/quotas/Quotas.feature index ec111faa51..020f58ec31 100644 --- a/tests/ctst/features/quotas/Quotas.feature +++ b/tests/ctst/features/quotas/Quotas.feature @@ -72,11 +72,9 @@ Feature: Quota Management for APIs Given an action "DeleteObject" And a permission to perform the "PutObject" action And a STORAGE_MANAGER type - And a bucket quota set to 10000 B - And an account quota set to 10000 B - And an upload size of 1000 B for the object "obj-1" And a bucket quota set to B And an account quota set to B + And an upload size of 200 B for the object "obj-1" And a type And an environment setup for the API And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API @@ -97,6 +95,80 @@ Feature: Quota Management for APIs | 100 | 0 | 200 | IAM_USER | | 100 | 200 | 200 | IAM_USER | + @2.6.0 + @PreMerge + @Quotas + @CronJob + @DataDeletion + @NonVersioned + Scenario Outline: Quotas are affected by deletion operations between count items runs + Given an action "DeleteObject" + And a permission to perform the "PutObject" action + And a STORAGE_MANAGER type + And a bucket quota set to 1000 B + And an account quota set to 1000 B + And an upload size of 1000 B for the object "obj-1" + And a bucket quota set to B + And an account quota set to B + And a type + And an environment setup for the API + And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API + When I wait 3 seconds + And I PUT an object with size + Then the API should "fail" with "QuotaExceeded" + When the "count-items" cronjobs completes without error + # Wait for inflights to be read by SCUBA + When I wait 3 seconds + # At this point if negative inflights are not supported, write should + # not be possible, as the previous inflights are now part of the current + # metrics. + And i delete object "obj-1" + # Wait for inflights to be read by SCUBA + And I wait 3 seconds + And I PUT an object with size + Then the API should "succeed" with "" + + Examples: + | uploadSize | bucketQuota | accountQuota | userType | + | 100 | 200 | 0 | ACCOUNT | + + @2.6.0 + @PreMerge + @Quotas + @CronJob + @DataDeletion + @NonVersioned + Scenario Outline: Negative inflights do not allow to bypass the quota + Given an action "DeleteObject" + And a permission to perform the "PutObject" action + And a STORAGE_MANAGER type + And a bucket quota set to 1000 B + And an account quota set to 1000 B + And an upload size of 1000 B for the object "obj-1" + And a bucket quota set to B + And an account quota set to B + And a type + And an environment setup for the API + And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API + When I wait 3 seconds + And I PUT an object with size + Then the API should "fail" with "QuotaExceeded" + When the "count-items" cronjobs completes without error + # Wait for inflights to be read by SCUBA + When I wait 3 seconds + # At this point if negative inflights are not supported, write should + # not be possible, as the previous inflights are now part of the current + # metrics. + And i delete object "obj-1" + # Wait for inflights to be read by SCUBA + And I wait 3 seconds + And I PUT an object with size 1000 + Then the API should "fail" with "QuotaExceeded" + + Examples: + | uploadSize | bucketQuota | accountQuota | userType | + | 200 | 200 | 0 | ACCOUNT | + @2.6.0 @PreMerge @Quotas @@ -129,3 +201,34 @@ Feature: Quota Management for APIs | RestoreObject | 100 | 0 | 99 | IAM_USER | fail | QuotaExceeded | 3 | | RestoreObject | 100 | 99 | 99 | IAM_USER | fail | QuotaExceeded | 3 | | RestoreObject | 100 | 101 | 101 | IAM_USER | succeed | | 3 | + + @2.6.0 + @PreMerge + @Quotas + @Restore + @Dmf + @ColdStorage + @Only + Scenario Outline: Restored object expiration updates quotas + Given an action "" + And a STORAGE_MANAGER type + And a transition workflow to "e2e-cold" location + And an upload size of B for the object "obj-1" + Then object "obj-1" should be "transitioned" and have the storage class "e2e-cold" + Given a bucket quota set to B + And an account quota set to B + And a type + And an environment setup for the API + And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API + When i restore object "" for 5 days + Then the API should "succeed" with "" + And object "obj-1" should be "restored" and have the storage class "e2e-cold" + Given a STORAGE_MANAGER type + Then object "obj-1" should expire in 5 days + When i wait for 5 days + Then object "obj-1" should be "cold" and have the storage class "e2e-cold" + + Examples: + | action | uploadSize | bucketQuota | accountQuota | userType | + | RestoreObject | 100 | 0 | 0 | ACCOUNT | + | RestoreObject | 100 | 101 | 101 | ACCOUNT | \ No newline at end of file diff --git a/tests/ctst/steps/quotas/quotas.ts b/tests/ctst/steps/quotas/quotas.ts index cd7ce1b022..850119f908 100644 --- a/tests/ctst/steps/quotas/quotas.ts +++ b/tests/ctst/steps/quotas/quotas.ts @@ -6,6 +6,7 @@ import { Scality, Command, Utils, AWSCredentials, Constants, Identity, IdentityE import { createJobAndWaitForCompletion } from '../utils/kubernetes'; import { createBucketWithConfiguration, putObject } from '../utils/utils'; import { hashStringAndKeepFirst20Characters } from 'common/utils'; +import assert from 'assert'; export async function prepareQuotaScenarios(world: Zenko, scenarioConfiguration: ITestCaseHookParameter) { /** @@ -136,6 +137,16 @@ Given('a bucket quota set to {int} B', async function (this: Zenko, quota: numbe result, }); + // Ensure the quota is set + const resultGet: Command = await Scality.getBucketQuota( + this.parameters, + this.getCommandParameters()); + this.logger.debug('GetBucketQuota result', { + resultGet, + }); + + assert(resultGet.stdout.includes(`${quota}`)); + if (result.err) { throw new Error(result.err); } @@ -158,6 +169,9 @@ Given('an account quota set to {int} B', async function (this: Zenko, quota: num result, }); + // Ensure the quota is set + assert(JSON.parse(result.stdout).quota === quota); + if (result.err) { throw new Error(result.err); } diff --git a/tests/ctst/steps/utils/kubernetes.ts b/tests/ctst/steps/utils/kubernetes.ts index 32e9aceb91..636353c50b 100644 --- a/tests/ctst/steps/utils/kubernetes.ts +++ b/tests/ctst/steps/utils/kubernetes.ts @@ -1,3 +1,6 @@ +import fs from 'fs'; +import * as path from 'path'; +import lockFile from 'proper-lockfile'; import { KubernetesHelper, Utils } from 'cli-testing'; import Zenko from 'world/Zenko'; import { @@ -71,12 +74,39 @@ export function createKubeCustomObjectClient(world: Zenko): CustomObjectsApi { return KubernetesHelper.customObject; } -export async function createJobAndWaitForCompletion(world: Zenko, jobName: string, customMetadata?: string) { +export async function createJobAndWaitForCompletion( + world: Zenko, + jobName: string, + customMetadata?: string +) { const batchClient = createKubeBatchClient(world); const watchClient = createKubeWatchClient(world); + + const lockFilePath = path.join('/tmp', `${jobName}.lock`); + let releaseLock: (() => Promise) | false = false; + + if (!fs.existsSync(lockFilePath)) { + fs.writeFileSync(lockFilePath, ''); + } + try { + releaseLock = await lockFile.lock(lockFilePath, { + // Expect the jobs in the queue does not take more than 5 minutes to complete + stale: 10 * 60 * 1000, + // use a linear backoff strategy + retries: { + retries: 610, + factor: 1, + minTimeout: 1000, + maxTimeout: 1000, + }, + }); + world.logger.debug(`Acquired lock for job: ${jobName}`); + + // Read the cron job and prepare the job spec const cronJob = await batchClient.readNamespacedCronJob(jobName, 'default'); const cronJobSpec = cronJob.body.spec?.jobTemplate.spec; + const job = new V1Job(); const metadata = new V1ObjectMeta(); job.apiVersion = 'batch/v1'; @@ -87,50 +117,57 @@ export async function createJobAndWaitForCompletion(world: Zenko, jobName: strin 'cronjob.kubernetes.io/instantiate': 'ctst', }; if (customMetadata) { - metadata.annotations = { - custom: customMetadata, - }; + metadata.annotations.custom = customMetadata; } job.metadata = metadata; + // Create the job const response = await batchClient.createNamespacedJob('default', job); - world.logger.debug('job created', { - job: response.body.metadata, - }); + world.logger.debug('Job created', { job: response.body.metadata }); const expectedJobName = response.body.metadata?.name; + // Watch for job completion await new Promise((resolve, reject) => { void watchClient.watch( '/apis/batch/v1/namespaces/default/jobs', {}, (type: string, apiObj, watchObj) => { - if (job.metadata?.name && expectedJobName && - (watchObj.object?.metadata?.name as string)?.startsWith?.(expectedJobName)) { + if ( + expectedJobName && + (watchObj.object?.metadata?.name as string)?.startsWith?.(expectedJobName) + ) { if (watchObj.object?.status?.succeeded) { - world.logger.debug('job succeeded', { - job: job.metadata, - }); + world.logger.debug('Job succeeded', { job: job.metadata }); resolve(); } else if (watchObj.object?.status?.failed) { - world.logger.debug('job failed', { + world.logger.debug('Job failed', { job: job.metadata, object: watchObj.object, }); - reject(new Error('job failed')); + reject(new Error('Job failed')); } } - }, reject); + }, + reject + ); }); } catch (err: unknown) { - world.logger.error('error creating job', { + world.logger.error('Error creating or waiting for job completion', { jobName, err, }); throw err; + } finally { + // Ensure the lock is released + if (releaseLock) { + await releaseLock(); + world.logger.debug(`Released lock for job: ${jobName}`); + } } } + export async function waitForZenkoToStabilize( world: Zenko, needsReconciliation = false, timeout = 15 * 60 * 1000, namespace = 'default') { // ZKOP pulls the overlay configuration from Pensieve every 5 seconds diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index 12d94e94fe..44881b5f7d 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -4,6 +4,8 @@ import { AccessKey } from '@aws-sdk/client-iam'; import { Credentials } from '@aws-sdk/client-sts'; import { aws4Interceptor } from 'aws4-axios'; import qs from 'qs'; +import fs from 'fs'; +import lockFile from 'proper-lockfile'; import Werelogs from 'werelogs'; import { CacheHelper, @@ -633,24 +635,36 @@ export default class Zenko extends World { if (!Identity.hasIdentity(IdentityEnum.ACCOUNT, accountName)) { Identity.useIdentity(IdentityEnum.ADMIN, site.adminIdentityName); - + const filePath = `/tmp/account-init-${accountName}.json`; + if (!fs.existsSync(filePath)) { + fs.writeFileSync(filePath, JSON.stringify({ + ready: false, + })); + } let account = null; - CacheHelper.logger.debug('Creating account', { - accountName, - adminIdentityName: site.adminIdentityName, - credentials: Identity.getCurrentCredentials(), - }); - // Create the account if already exist will not throw any error + let releaseLock: (() => Promise) | null = null; try { - await SuperAdmin.createAccount({ accountName }); - /* eslint-disable */ - } catch (err: any) { - CacheHelper.logger.debug('Error while creating account', { - accountName, - err, + releaseLock = await lockFile.lock(filePath, { + stale: Constants.DEFAULT_TIMEOUT / 2, + retries: { + retries: 5, + factor: 3, + minTimeout: 1000, + maxTimeout: 5000, + } }); - if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { - throw err; + + try { + await SuperAdmin.createAccount({ accountName }); + /* eslint-disable */ + } catch (err: any) { + if (!err.EntityAlreadyExists && err.code !== 'EntityAlreadyExists') { + throw err; + } + } + } finally { + if (releaseLock) { + await releaseLock(); } } /* eslint-enable */ @@ -693,7 +707,7 @@ export default class Zenko extends World { const accountName = this.sites['source']?.accountName || CacheHelper.parameters.AccountName!; const accountAccessKeys = Identity.getCredentialsForIdentity( IdentityEnum.ACCOUNT, this.sites['source']?.accountName - || CacheHelper.parameters.AccountName!) || { + || CacheHelper.parameters.AccountName!) || { accessKeyId: '', secretAccessKey: '', }; @@ -865,7 +879,7 @@ export default class Zenko extends World { } async awsS3Request(method: Method, path: string, - userCredentials: AWSCredentials, headers: object = {}, payload: object = {}) : Promise { + userCredentials: AWSCredentials, headers: object = {}, payload: object = {}): Promise { const interceptor = aws4Interceptor({ options: { region: 'us-east-1', @@ -891,7 +905,7 @@ export default class Zenko extends World { statusCode: response.status, data: response.data as unknown, }; - /* eslint-disable */ + /* eslint-disable */ } catch (err: any) { return { stdout: '', @@ -967,7 +981,7 @@ export default class Zenko extends World { } } - async addWebsiteEndpoint(this: Zenko, endpoint: string) : + async addWebsiteEndpoint(this: Zenko, endpoint: string): Promise<{ statusCode: number; data: object } | { statusCode: number; err: unknown }> { return await this.managementAPIRequest('POST', `/config/${this.parameters.InstanceID}/website/endpoint`, @@ -977,7 +991,7 @@ export default class Zenko extends World { `"${endpoint}"`); } - async deleteLocation(this: Zenko, locationName: string) : + async deleteLocation(this: Zenko, locationName: string): Promise<{ statusCode: number; data: object } | { statusCode: number; err: unknown }> { return await this.managementAPIRequest('DELETE', `/config/${this.parameters.InstanceID}/location/${locationName}`);