Skip to content

Commit

Permalink
FT/addClueso
Browse files Browse the repository at this point in the history
  • Loading branch information
Cloud User authored and LaurenSpiegel committed Jan 16, 2018
1 parent 92f8dc8 commit 32f3135
Show file tree
Hide file tree
Showing 9 changed files with 396 additions and 8 deletions.
1 change: 1 addition & 0 deletions .eslintrc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{ "extends": "scality" }
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Dependency directory
node_modules

# Logs
logs
*.log
30 changes: 30 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
{
"name": "zenko",
"version": "1.0.0",
"directories": {
"doc": "docs",
"test": "tests"
},
"scripts": {
"test": "cd tests/ && mocha -t 90000 clueso-tests/",
"lint": "eslint $(git ls-files '*.js')"
},
"repository": {
"type": "git",
"url": "git+https://github.com/scality/Zenko.git"
},
"author": "",
"license": "",
"bugs": {
"url": "https://github.com/scality/Zenko/issues"
},
"homepage": "https://github.com/scality/Zenko#readme",
"devDependencies": {
"async": "^2.6.0",
"aws-sdk": "^2.169.0",
"eslint": "2.13.1",
"eslint-config-airbnb": "6.2.0",
"eslint-config-scality": "scality/Guidelines",
"mocha": "^4.0.1"
}
}
29 changes: 27 additions & 2 deletions swarm-production/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ $ docker stack deploy -c docker-stack.yml zenko-prod

### Access and Secret Keys

SKIP THIS STEP IF YOU ARE USING ZENKO ORBIT
SKIP THIS STEP IF YOU ARE USING ZENKO ORBIT

The default access and secret key pair is `deployment-specific-access-key` /
`deployment-specific-secret-key`. Changing them is a must, and can be done by
Expand All @@ -88,7 +88,7 @@ variables in the `secrets.txt` file.

### Endpoint Name

SKIP THIS STEP IF YOU ARE USING ZENKO ORBIT
SKIP THIS STEP IF YOU ARE USING ZENKO ORBIT

By default the endpoint name is `zenko`, you may change this to the host name
presented to your clients (for example `s3.mydomain.com`) by exporting the
Expand Down Expand Up @@ -139,6 +139,11 @@ Go to [Zenko Orbit](https://www.zenko.io/admin) to manage your deployment throug

## Testing

To use the `tests` folder, update the credentiasl in `Zenko/tests/utils/s3SDK.js`
with credentials generated in Zenko Orbit.
Install node modules with `npm install`
Then, simply run `npm test`.

Using [awscli](https://aws.amazon.com/cli/), we can perform S3 operations
on our Zenko stack. Since the load balancer container is deployed in `global`
mode, we can use any of the swarm nodes as the endpoint.
Expand All @@ -162,6 +167,26 @@ $ aws s3 --endpoint http://zenko ls s3://bucket1
2017-06-20 00:12:53 5052 README.md
```

### Clueso Search
Clueso search can be tested from within the S3-frontend container.

First, from your machine (not within the S3 Docker), create some objects:

```shell
$ aws s3api put-object --bucket bucket1 --key findme1 --endpoint-url http://127.0.0.1 --metadata "color=blue"
$ aws s3api put-object --bucket bucket1 --key leaveMeAlone2 --endpoint-url http://127.0.0.1 --metadata "color=red"
$ aws s3api put-object --bucket bucket1 --key findme2 --endpoint-url http://127.0.0.1 --metadata "color=blue"
```

From within the S3-frontend container:

```shell
$ bin/search_bucket.js -a accessKey1 -k verySecretKey1 -b bucket1 -q "userMd.\`x-amz-meta-color\`=\"blue\"" -h 127.0.0.1 -p 8000
```

You can see the Spark Master UI at port 8080
Check out the Livy UI at port 8998

## Further improvements

* Allow using an external environment vars file
Expand Down
84 changes: 78 additions & 6 deletions swarm-production/docker-stack.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
---

version: "3.4"

services:
s3-data:
image: zenko/cloudserver:pensieve-0
image: zenko/cloudserver:pensieve-1
ports:
- "9992"
networks:
Expand All @@ -21,7 +19,7 @@ services:
- node.labels.io.zenko.type == storage

s3-metadata:
image: zenko/cloudserver:pensieve-0
image: zenko/cloudserver:pensieve-1
ports:
- "9993"
networks:
Expand All @@ -39,7 +37,7 @@ services:
- node.labels.io.zenko.type == storage

s3-front:
image: zenko/cloudserver:pensieve-0
image: zenko/cloudserver:pensieve-1
ports:
- "8001"
networks:
Expand All @@ -52,7 +50,6 @@ services:
METADATA_HOST: s3-metadata
REDIS_HOST: cache
ENDPOINT: "${ENDPOINT:-zenko}"
MANAGEMENT_ENDPOINT: 'https://api.zenko.io'
REMOTE_MANAGEMENT_DISABLE: "${REMOTE_MANAGEMENT_DISABLE:-0}"
secrets:
- s3-credentials
Expand Down Expand Up @@ -94,6 +91,79 @@ services:
delay: "10s"
monitor: "5s"

livy:
image: scality/clueso-livy:pensieve
hostname: livy
ports:
- "8998:8998"
# so that each spawned session can have an app ui, open range
- "4040-4049:4040-4049"
depends_on:
- spark-master
networks:
- backend
# need so can contact lb to get parquet files on search
- frontend-dmz

spark-master:
image: scality/clueso-spark-master:pensieve
hostname: spark-master
depends_on:
- queue
- graphite
- lb
ports:
# master ui:
- "8080:8080"
# ingestion pipeline app ui:
- "4050:4050"
# landing populator app ui:
- "4051:4051"
# storage info tool app ui
- "4052:4052"
# compactor tool app ui
- "4053:4053"
volumes:
- "heapdumps:/clueso/heapdumps:rw"
secrets:
- s3-credentials
networks:
- backend
# need so can contact lb to create METADATA bucket
- frontend-dmz

spark-worker:
image: scality/clueso-spark-worker:pensieve
depends_on:
- spark-master
environment:
SPARK_MASTER_HOST: spark://spark-master:7077
CLUSTER_DNS: spark-worker
INIT_REPLICATE: 1
networks:
- backend
# need so can contact lb to get parquet files on search
- frontend-dmz
deploy:
replicas: 1

graphite:
image: scality/clueso-grafana-graphite
depends_on:
- spark-master
ports:
- '8005:80'
- '8081:81'
- '3000:3000'
networks:
- backend
volumes:
- "grafana-data:/opt/grafana/data:rw"
deploy:
placement:
constraints:
- node.labels.io.zenko.type == storage

queue:
image: 'wurstmeister/kafka:1.0.0'
environment:
Expand Down Expand Up @@ -188,9 +258,11 @@ networks:
volumes:
s3-data:
s3-metadata:
grafana-data:
queue-journal:
quorum-data:
quorum-datalog:
heapdumps:

secrets:
s3-credentials:
Expand Down
102 changes: 102 additions & 0 deletions tests/clueso-tests/basicSearch.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
const s3Client = require('../utils/s3SDK');
const runAndCheckSearch = require('../utils/helpers').runAndCheckSearch;

const objectKey = 'findMe';
const hiddenKey = 'leaveMeAlone';
const userMetadata = { food: 'pizza' };
const updatedUserMetadata = { food: 'cake' };


describe('Basic search', () => {
const bucketName = `basicsearchmebucket${Date.now()}`;
before(done => {
s3Client.createBucket({ Bucket: bucketName }, err => {
if (err) {
return done(err);
}
return s3Client.putObject({ Bucket: bucketName, Key: objectKey,
Metadata: userMetadata }, err => {
if (err) {
return done(err);
}
return s3Client.putObject({ Bucket: bucketName,
Key: hiddenKey },
err => {
// give ingestion pipeline some time
setTimeout(() => done(err), 45000);
});
});
});
});

after(done => {
s3Client.deleteObjects({ Bucket: bucketName, Delete: { Objects: [
{ Key: objectKey },
{ Key: hiddenKey }],
} },
err => {
if (err) {
return done(err);
}
return s3Client.deleteBucket({ Bucket: bucketName }, done);
});
});

it('should list object with searched for system metadata', done => {
const encodedSearch = encodeURIComponent(`key="${objectKey}"`);
return runAndCheckSearch(s3Client, bucketName,
encodedSearch, objectKey, done);
});

it('should list object with searched for user metadata', done => {
const encodedSearch =
encodeURIComponent('userMd.\`x-amz-meta-food\`' +
`="${userMetadata.food}"`);
return runAndCheckSearch(s3Client, bucketName, encodedSearch,
objectKey, done);
});

it('should return empty listing when no object has user md', done => {
const encodedSearch =
encodeURIComponent('userMd.\`x-amz-meta-food\`="nosuchfood"');
return runAndCheckSearch(s3Client, bucketName,
encodedSearch, null, done);
});

describe('search when overwrite object', () => {
before(done => {
s3Client.putObject({ Bucket: bucketName, Key: objectKey,
Metadata: updatedUserMetadata }, err => {
// give ingestion pipeline some time and make sure
// cache expires (60 second cache expiry)
setTimeout(() => done(err), 75000);
});
});

it('should list object with searched for updated user metadata',
done => {
const encodedSearch =
encodeURIComponent('userMd.\`x-amz-meta-food\`' +
`="${updatedUserMetadata.food}"`);
return runAndCheckSearch(s3Client, bucketName, encodedSearch,
objectKey, done);
});
});
});

describe('Search when no objects in bucket', () => {
const bucketName = `noobjectbucket${Date.now()}`;
before(done => {
s3Client.createBucket({ Bucket: bucketName }, done);
});

after(done => {
s3Client.deleteBucket({ Bucket: bucketName }, done);
});

it('should return empty listing when no objects in bucket', done => {
const encodedSearch = encodeURIComponent(`key="${objectKey}"`);
return runAndCheckSearch(s3Client, bucketName,
encodedSearch, null, done);
});
});
Loading

0 comments on commit 32f3135

Please sign in to comment.