Skip to content

Commit

Permalink
testing deletes
Browse files Browse the repository at this point in the history
  • Loading branch information
williamlardier committed May 6, 2024
1 parent c118d12 commit 07c4373
Show file tree
Hide file tree
Showing 9 changed files with 238 additions and 53 deletions.
87 changes: 83 additions & 4 deletions tests/ctst/common/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,35 @@ import { ActionPermissionsType } from 'steps/bucket-policies/utils';

setDefaultTimeout(Constants.DEFAULT_TIMEOUT);

/**
* @param {Zenko} this world object
* @param {string} objectName object name
* @returns {string} the object name based on the backend flakyness
*/
function getObjectNameWithBackendFlakiness(this: Zenko, objectName: string) {
let objectNameFinal;
const backendFlakinessRetryNumber = this.getSaved<string>('backendFlakinessRetryNumber');
const backendFlakiness = this.getSaved<string>('backendFlakiness');

if (!backendFlakiness || !backendFlakinessRetryNumber || !objectName) {
return objectName;
}

switch (backendFlakiness) {
case 'command':
objectNameFinal = `${objectName}.scal-retry-command-${backendFlakinessRetryNumber}`;
break;
case 'archive':
case 'restore':
objectNameFinal = `${objectName}.scal-retry-${backendFlakiness}-job-${backendFlakinessRetryNumber}`;
break;
default:
process.stdout.write(`Unknown backend flakyness ${backendFlakiness}\n`);
return objectName;
}
return objectNameFinal;
}

async function getTopicsOffsets(topics: string[], kafkaAdmin: Admin) {
const offsets = [];
for (const topic of topics) {
Expand Down Expand Up @@ -90,8 +119,8 @@ Given('an object {string} that {string}', async function (this: Zenko, objectNam
}
});

When('the user tries to perform the current S3 action on the bucket {string} times with a {string} ms delay',
async function (this: Zenko, numberOfRuns: string, delay: string) {
When('the user tries to perform the current S3 action on the bucket {int} times with a {int} ms delay',
async function (this: Zenko, numberOfRuns: number, delay: number) {
this.setAuthMode('test_identity');
const action = {
...this.getSaved<ActionPermissionsType>('currentAction'),
Expand All @@ -104,7 +133,7 @@ When('the user tries to perform the current S3 action on the bucket {string} tim
action.action = action.action.replace('Version', '');
this.addToSaved('currentAction', action);
}
for (let i = 0; i < Number(numberOfRuns); i++) {
for (let i = 0; i < numberOfRuns; i++) {
// For repeated WRITE actions, we want to change the object name
if (action.action === 'PutObject') {
this.addToSaved('objectName', `objectrepeat-${Utils.randomString()}`);
Expand All @@ -116,7 +145,7 @@ When('the user tries to perform the current S3 action on the bucket {string} tim
// stop at any error, the error will be evaluated in a separated step
return;
}
await Utils.sleep(Number(delay));
await Utils.sleep(delay);
}
});

Expand Down Expand Up @@ -144,3 +173,53 @@ Then('the operation finished without error', function (this: Zenko) {
this.cleanupEntity();
assert.strictEqual(!!this.getResult().err, false);
});

When('i restore object {string} for {int} days', async function (this: Zenko, objectName: string, days: number) {
const objName = getObjectNameWithBackendFlakiness.call(this, objectName) || this.getSaved<string>('objectName');
this.resetCommand();
this.addCommandParameter({ bucket: this.getSaved<string>('bucketName') });
this.addCommandParameter({ key: objName });
const versionId = this.getSaved<Map<string, string>>('createdObjects')?.get(objName);
if (versionId) {
this.addCommandParameter({ versionId });
}
this.addCommandParameter({ restoreRequest: `Days=${days}` });
await S3.restoreObject(this.getCommandParameters());
});

Given('an upload size of {int} B for the object {string}', async function (
this: Zenko,
size: number,
objectName: string
) {
this.addToSaved('objectSize', size);
if (this.getSaved<boolean>('preExistingObject')) {
if (objectName) {
this.addToSaved('objectName', objectName);
} else {
this.addToSaved('objectName', `object-${Utils.randomString()}`);
}
await putObject(this, this.getSaved<string>('objectName'));
}
});

When('I PUT an object with size {int}', async function (this: Zenko, size: number) {
if (size > 0) {
this.addToSaved('objectSize', size);
}
this.addToSaved('objectName', `object-${Utils.randomString()}`);
const result = await putObject(this, this.getSaved<string>('objectName'));
this.setResult(result);
});

When('i delete object {string}', async function (this: Zenko, objectName: string) {
const objName = objectName || this.getSaved<string>('objectName');
this.resetCommand();
this.addCommandParameter({ bucket: this.getSaved<string>('bucketName') });
this.addCommandParameter({ key: objName });
const versionId = this.getSaved<Map<string, string>>('createdObjects')?.get(objName);
if (versionId) {
this.addCommandParameter({ versionId });
}
await S3.deleteObject(this.getCommandParameters());
});
17 changes: 17 additions & 0 deletions tests/ctst/common/utils.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import { exec } from 'child_process';
import {
Utils,
} from 'cli-testing';
Expand Down Expand Up @@ -87,3 +88,19 @@ export const s3FunctionExtraParams: { [key: string]: Record<string, unknown>[] }
}),
}],
};

/**
* Executes a shell command and return it as a Promise.
* @param {string} cmd The command to execute
* @return {Promise<string>} the command output
*/
export function execShellCommand(cmd: string): Promise<string> {
return new Promise((resolve, reject) => {
exec(cmd, (error, stdout, stderr) => {
if (error) {
reject(error);
}
resolve(stdout || stderr);
});
});
}
120 changes: 92 additions & 28 deletions tests/ctst/features/quotas/Quotas.feature
Original file line number Diff line number Diff line change
Expand Up @@ -2,26 +2,21 @@ Feature: Quota Management for APIs
This feature ensures that quotas are correctly set and honored
for different APIs.

# TODO:
# ONGOING - Authz of Quota APIs: only storage manager should be allowed to edit by default
# ONGOING - Quota evaluation: should work for all types of identities (roles, accounts, users)
# ONGOING - Inflights must be properly handled in a test pushing in 2 steps
# ONGOING - Same as above but should not wait, for Restore Object
@2.6.0
@PreMerge
@Quotas
@CronJob
@DataWrite
Scenario Outline: Quotas are evaluated during write operations
Given an action "<action>"
And an upload size of "<uploadSize>" B
And an upload size of <uploadSize> B for the object ""
And a "STORAGE_MANAGER" type
And a bucket quota set to "<bucketQuota>" B
And an account quota set to "<accountQuota>" B
And a bucket quota set to <bucketQuota> B
And an account quota set to <accountQuota> B
And a "<userType>" type
And an environment setup for the API
And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API
And the user tries to perform the current S3 action on the bucket "20" times with a "400" ms delay
When the user tries to perform the current S3 action on the bucket 20 times with a 400 ms delay
Then the API should "<result>" with "<expectedError>"

Examples:
Expand Down Expand Up @@ -62,29 +57,98 @@ Feature: Quota Management for APIs
@Quotas
@CronJob
@Restore
Scenario Outline: Object restoration implements strict quotas
Given an action "<action>"
And an upload size of "<uploadSize>" B
Scenario Outline: Object restoration (fake) implements strict quotas
Given an action "RestoreObject"
And an upload size of <uploadSize> B for the object ""
And a "STORAGE_MANAGER" type
And a bucket quota set to "<bucketQuota>" B
And an account quota set to "<accountQuota>" B
And a bucket quota set to <bucketQuota> B
And an account quota set to <accountQuota> B
And a "<userType>" type
And an environment setup for the API
And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API
And the user tries to perform the current S3 action on the bucket "1" times with a "0" ms delay
And the user tries to perform the current S3 action on the bucket 1 times with a 0 ms delay
Then the API should "<result>" with "<expectedError>"

Examples:
| action | uploadSize | bucketQuota | accountQuota | userType | result | expectedError |
| RestoreObject | 100 | 0 | 0 | ACCOUNT | succeed | |
| RestoreObject | 100 | 99 | 0 | ACCOUNT | fail | QuotaExceeded |
| RestoreObject | 100 | 0 | 99 | ACCOUNT | fail | QuotaExceeded |
| RestoreObject | 100 | 99 | 99 | ACCOUNT | fail | QuotaExceeded |
| RestoreObject | 100 | 101 | 101 | ACCOUNT | succeed | |
| RestoreObject | 100 | 0 | 0 | IAM_USER | succeed | |
| RestoreObject | 100 | 99 | 0 | IAM_USER | fail | QuotaExceeded |
| RestoreObject | 100 | 0 | 99 | IAM_USER | fail | QuotaExceeded |
| RestoreObject | 100 | 99 | 99 | IAM_USER | fail | QuotaExceeded |
| RestoreObject | 100 | 101 | 101 | IAM_USER | succeed | |
# TODO test with real restores
# TODO test the deletion
| uploadSize | bucketQuota | accountQuota | userType | result | expectedError |
| 100 | 0 | 0 | ACCOUNT | succeed | |
| 100 | 99 | 0 | ACCOUNT | fail | QuotaExceeded |
| 100 | 0 | 99 | ACCOUNT | fail | QuotaExceeded |
| 100 | 99 | 99 | ACCOUNT | fail | QuotaExceeded |
| 100 | 101 | 101 | ACCOUNT | succeed | |
| 100 | 0 | 0 | IAM_USER | succeed | |
| 100 | 99 | 0 | IAM_USER | fail | QuotaExceeded |
| 100 | 0 | 99 | IAM_USER | fail | QuotaExceeded |
| 100 | 99 | 99 | IAM_USER | fail | QuotaExceeded |
| 100 | 101 | 101 | IAM_USER | succeed | |

@2.6.0
@PreMerge
@Quotas
@CronJob
@DataDeletion
@NonVersioned
Scenario Outline: Quotas are affected by deletion operations
Given an action "DeleteObject"
# First set a big quota to enable the inflights
# and ensure the initial PUT is accepted
And a "STORAGE_MANAGER" type
And a bucket quota set to 10000 B
And an account quota set to 10000 B
# Put an object: fill fill the quota directly
And an upload size of 1000 B for the object "obj-1"
# Set a small quota
And a bucket quota set to <bucketQuota> B
And an account quota set to <accountQuota> B
And a "<userType>" type
And an environment setup for the API
And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API
When I PUT an object with size <uploadSize>
Then the API should "fail" with "QuotaExceeded"
When i delete object "obj-1"
And I wait 3 seconds
And I PUT an object with size <uploadSize>
Then the API should "succeed" with ""

Examples:
| uploadSize | bucketQuota | accountQuota | userType |
| 100 | 99 | 0 | ACCOUNT |
| 100 | 0 | 99 | ACCOUNT |
| 100 | 99 | 99 | ACCOUNT |
| 100 | 99 | 0 | IAM_USER |
| 100 | 0 | 99 | IAM_USER |
| 100 | 99 | 99 | IAM_USER |
# @2.6.0
# @PreMerge
# @Quotas
# @Restore
# @Dmf
# @ColdStorage
# Scenario Outline: Object restoration implements strict quotas
# Given an action "<action>"
# And a flaky backend that will require <retryNumber> retries for "restore"
# And an upload size of "<uploadSize>" B
# And a "STORAGE_MANAGER" type
# And a bucket quota set to "<bucketQuota>" B
# And an account quota set to "<accountQuota>" B
# And a "<userType>" type
# And an environment setup for the API
# And an "existing" IAM Policy that "applies" with "ALLOW" effect for the current API
# And a transition workflow to "e2e-cold" location
# When i restore object "" for 5 days
# Then the API should "<result>" with "<expectedError>"

# Examples:
# | action | uploadSize | bucketQuota | accountQuota | userType | result | expectedError | retryNumber |
# | RestoreObject | 100 | 0 | 0 | ACCOUNT | succeed | | 3 |
# | RestoreObject | 100 | 99 | 0 | ACCOUNT | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 0 | 99 | ACCOUNT | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 99 | 99 | ACCOUNT | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 101 | 101 | ACCOUNT | succeed | | 3 |
# | RestoreObject | 100 | 0 | 0 | IAM_USER | succeed | | 3 |
# | RestoreObject | 100 | 99 | 0 | IAM_USER | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 0 | 99 | IAM_USER | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 99 | 99 | IAM_USER | fail | QuotaExceeded | 3 |
# | RestoreObject | 100 | 101 | 101 | IAM_USER | succeed | | 3 |
# TODO test with real restores
# TODO test the deletion
2 changes: 1 addition & 1 deletion tests/ctst/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
"@typescript-eslint/eslint-plugin": "^5.45.0",
"@typescript-eslint/parser": "^5.45.0",
"babel-jest": "^29.3.1",
"cli-testing": "github:scality/cli-testing.git#cc24312c636a50059295d36ad12ab587b96fbcda",
"cli-testing": "github:scality/cli-testing.git#7fa94cea9369944e53cc2db454d40a103abf28c5",
"eslint": "^8.28.0"
},
"scripts": {
Expand Down
8 changes: 0 additions & 8 deletions tests/ctst/steps/bucket-policies/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ Given('an action {string}', function (this: Zenko, apiName: string) {
}
});

Given('an upload size of {string} B', async function (this: Zenko, size: string) {
this.addToSaved('objectSize', parseInt(size, 10));
if (this.getSaved<boolean>('preExistingObject')) {
this.addToSaved('objectName', `objectforbptests-${Utils.randomString()}`);
await putObject(this, this.getSaved<string>('objectName'));
}
});

Given('an existing bucket prepared for the action', async function (this: Zenko) {
await createBucketWithConfiguration(this,
this.getSaved<string>('bucketName'),
Expand Down
24 changes: 24 additions & 0 deletions tests/ctst/steps/dmf.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import { Given, setDefaultTimeout, After } from '@cucumber/cucumber';
import assert from 'assert';
import { Constants } from 'cli-testing';
import { execShellCommand } from 'common/utils';
import Zenko from 'world/Zenko';

setDefaultTimeout(Constants.DEFAULT_TIMEOUT);

async function cleanDmfVolume() {
await execShellCommand('rm -rf /cold-data/*');
}

Given('a flaky backend that will require {int} retries for {string}',
function (this: Zenko, retryNumber: number, op: string) {
assert(['restore', 'archive', 'command'].includes(op), `Invalid operation ${op}`);
assert(retryNumber > 0, `Invalid retry number ${retryNumber}`);

this.addToSaved('backendFlakinessRetryNumber', retryNumber);
this.addToSaved('backendFlakiness', op);
});

After({ tags: '@Dmf' }, async () => {
await cleanDmfVolume();
});
Loading

0 comments on commit 07c4373

Please sign in to comment.