Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test don't merge #1443

Closed
wants to merge 11 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion biome.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,11 @@
"**/*.jsonc",
"**/*.css"
],
"ignore": ["./packages/detector/src/index.js", "*.bundle.js"]
"ignore": [
"./packages/detector/src/index.js",
"*.bundle.js",
"./packages/provider/src/tasks/detection/decodePayload.js"
]
},
"linter": {
"rules": {
Expand Down
2 changes: 1 addition & 1 deletion dev/config/src/webpack/webpack.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ export default (mode: string) => {

return {
resolve: {
extensions: [".js", ".jsx", ".ts", ".tsx"],
extensions: [".js", ".jsx", ".ts", ".tsx", ".cjs"],
extensionAlias: {
".js": [".js", ".jsx", ".ts", ".tsx"],
},
Expand Down
2 changes: 1 addition & 1 deletion dev/scripts/src/setup/dapp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ export async function registerSiteKey(
const logger = env.logger;
const tasks = new Tasks(env);
logger.info(" - siteKeyRegister");
await tasks.clientTaskManager.registerSiteKey(siteKey as string);
await tasks.clientTaskManager.registerSiteKey(siteKey as string, {});
}
53 changes: 48 additions & 5 deletions docker/docker-compose.provider.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,21 @@ services:
options:
max-size: '100m'
max-file: '1'
healthcheck:
test: ["CMD", "curl", "--fail", "localhost:9229/v1/prosopo/provider/status"] # ping the status api
interval: 30s
retries: 3
start_period: 30s
timeout: 10s
provider:
profiles:
- production
- staging
image: prosopo/provider:${COMPOSE_PROVIDER_IMAGE_VERSION}
labels:
- "com.centurylinklabs.watchtower.enable=true" # only services with this tag will be updated by watchtower
- "vector.provider=true" # enable logging as a provider
- "vector.docker=true" # log docker events
restart: unless-stopped # unless the container has been stopped, it will be restarted, even on reboot
pull_policy: always
env_file:
Expand All @@ -39,8 +47,8 @@ services:
max-size: '100m'
max-file: '1'
healthcheck:
test: ["CMD", "curl", "--fail", "localhost:9229/v1/prosopo/provider/status"] # ping the status api
interval: 30s
test: ["CMD", "curl", "--fail", "localhost:9229/v1/prosopo/provider/details"] # ping the details endpoint
interval: 5m
retries: 3
start_period: 30s
timeout: 10s
Expand All @@ -63,7 +71,7 @@ services:
max-file: '1'
healthcheck:
test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')", "--quiet"] # ping the mongo server
interval: 30s
interval: 5m
retries: 3
start_period: 30s
timeout: 10s
Expand All @@ -74,6 +82,8 @@ services:
image: mongo:6.0.17
labels:
- "com.centurylinklabs.watchtower.enable=true" # only services with this tag will be updated by watchtower
- "vector.mongo=true" # enable logging as a provider
- "vector.docker=true" # log docker events
restart: unless-stopped # unless the container has been stopped, it will be restarted, even on reboot
volumes:
- /data/db:/data/db
Expand All @@ -90,7 +100,7 @@ services:
max-file: '1'
healthcheck:
test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')", "--quiet"] # ping the mongo server
interval: 30s
interval: 5m
retries: 3
start_period: 30s
timeout: 10s
Expand All @@ -103,6 +113,8 @@ services:
- ../.env.${NODE_ENV}
labels:
- "com.centurylinklabs.watchtower.enable=true" # only services with this tag will be updated by watchtower
- "vector.caddy=true" # enable logging as caddy
- "vector.docker=true" # log docker events
restart: unless-stopped # unless the container has been stopped, it will be restarted, even on reboot
ports:
- '80:80'
Expand All @@ -121,7 +133,7 @@ services:
max-file: '1'
healthcheck:
test: ["CMD", "curl", "--fail", "localhost:2019/metrics"] # ping the caddy admin api
interval: 30s
interval: 5m
retries: 3
start_period: 30s
timeout: 10s
Expand All @@ -130,8 +142,11 @@ services:
- production
- staging
image: containrrr/watchtower
command: ["watchtower", "--log-format", "JSON", "--remove-volumes", "--cleanup", "--warn-on-head-failure", "never", "--label-enable", "--interval", "30"]
labels:
- "com.centurylinklabs.watchtower.enable=true" # only services with this tag will be updated by watchtower
- "vector.watchtower=true" # enable logging as a watchtower
- "vector.docker=true" # log docker events
restart: unless-stopped # unless the container has been stopped, it will be restarted, even on reboot
env_file:
- ../.env.${NODE_ENV}
Expand All @@ -142,6 +157,34 @@ services:
options:
max-size: '100m'
max-file: '1'
vector:
profiles:
- production
- staging
image: prosopo/vector:${COMPOSE_PROVIDER_IMAGE_VERSION}
env_file:
- ../.env.${NODE_ENV}
labels:
- "com.centurylinklabs.watchtower.enable=true" # only services with this tag will be updated by watchtower
- "vector.docker=true" # log docker events
restart: unless-stopped # unless the container has been stopped, it will be restarted, even on reboot
volumes:
- ./provider.vector.toml:/etc/vector/vector.toml
- /var/run/docker.sock:/var/run/docker.sock # needed for monitoring docker container events, e.g. start/stop/etc
networks:
- internal
- external
logging:
driver: 'json-file'
options:
max-size: '100m'
max-file: '1'
healthcheck:
test: ["CMD", "curl", "--fail", "localhost:8686/health"]
interval: 30s
timeout: 10s
retries: 1
start_period: 10s
networks:
internal:
name: internal
Expand Down
32 changes: 32 additions & 0 deletions docker/images/vector/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Image to substitute env vars into vector.toml and start vector

# docker run -it --rm --env-file vector.env -v /home/geopro/bench/captcha5/vector.toml:/etc/vector/vector.toml prosopo/vector:latest

FROM timberio/vector:latest-debian

RUN apt-get update && apt-get install -y --no-install-recommends \
gettext \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

COPY vector.toml /etc/vector/vector.toml

# script for substituting env vars into the vector config
RUN cat <<EOF > /main.sh
#! /bin/bash

# ensure variables are set and no errors
set -eux

# substitute the env vars into the template toml config for vector
envsubst < /etc/vector/vector.toml > /etc/vector/vector-filled.toml

cat /etc/vector/vector-filled.toml

# start vector
vector --config /etc/vector/vector-filled.toml
EOF

RUN chmod +x /main.sh

ENTRYPOINT ["/main.sh"]
188 changes: 188 additions & 0 deletions docker/images/vector/vector.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
# enable vector's api
[api]
enabled = true
# only allow localhost to hit the api
address = "127.0.0.1:8686"

# take logs from docker containers with the correct label set
[sources.provider]
type = "docker_logs"
include_labels = ["vector.provider=true"]

# format the logs for a provider
[transforms.provider_format]
type = "remap"
inputs = ["provider"]
source = '''
.host="$OO_HOST"
del(.container_created_at)
del(.container_id)
del(.host)
del(.image)
del(.label)
del(.source_type)
del(.stream)
del(.timestamp)
'''

# print to console the provider logs
#[sinks.console]
#type = "console"
#inputs = ["provider_format"]
#encoding.codec = "json"

# send provider logs to openobserve
[sinks.openobserve_provider]
type = "http"
inputs = ["provider_format"]
uri = "https://api.openobserve.ai/api/$OO_ORG/${NODE_ENV}_provider_node/_json"
method = "post"
auth.strategy = "basic"
auth.user = "$OO_USERNAME"
auth.password = "$OO_PASSWORD"
compression = "gzip"
encoding.codec = "json"
encoding.timestamp_format = "rfc3339"
healthcheck.enabled = false

# listen to docker events for containers starting+ending
[sources.docker]
type = "exec"
# only containers with the correct label are listened to
# https://docs.docker.com/reference/cli/docker/system/events/#containers
# start – Indicates that the container has been started.
# stop – Indicates that the container has been stopped.
# restart – Indicates that the container has been restarted.
# die – Indicates that the container has stopped running (either due to a crash or intentional termination).
# pause – Indicates that the container is paused, meaning it is not actively running but hasn't been stopped.
# unpause – Indicates that the container has resumed running after being paused.
# kill – Indicates that the container has been forcefully terminated.
# oom – Indicates that the container was terminated due to running out of memory (Out-Of-Memory).
command = [ "docker", "events", "--format", "json", "--filter", "label=vector.docker=true", "--filter", "event=start", "--filter", "event=stop", "--filter", "event=restart", "--filter", "event=die", "--filter", "event=pause", "--filter", "event=unpause", "--filter", "event=kill", "--filter", "event=oom" ]
mode = "streaming"
decoding.codec = "json"

# delete useless info and add the host to identify what machine the logs are coming from
[transforms.docker_format]
type = "remap"
inputs = ["docker"]
source = '''
.host="$OO_HOST"
del(.id)
del(.from)
del(.host)
del(.Type)
.name=.Actor.Attributes.name
del(.Actor)
del(.scope)
del(.time)
del(.timeNano)
del(.source_type)
del(.timestamp)
'''

# print docker events to console
[sinks.console]
type = "console"
inputs = ["docker_format"]
encoding.codec = "json"

# send to openobserve
[sinks.openobserve_docker]
type = "http"
inputs = ["docker_format"]
uri = "https://api.openobserve.ai/api/$OO_ORG/${NODE_ENV}_provider_docker/_json"
method = "post"
auth.strategy = "basic"
auth.user = "$OO_USERNAME"
auth.password = "$OO_PASSWORD"
compression = "gzip"
encoding.codec = "json"
encoding.timestamp_format = "rfc3339"
healthcheck.enabled = false

[sources.caddy]
type = "docker_logs"
include_labels = ["vector.caddy=true"]

[transforms.caddy_format]
type = "remap"
inputs = ["caddy"]
source = '''
.=parse_json!(string!(.message))
.host="$OO_HOST"
del(.ts)
del(.user_id)
del(.size)
del(.message)
del(.resp_headers)
'''

# send to openobserve
[sinks.openobserve_caddy]
type = "http"
inputs = ["caddy_format"]
uri = "https://api.openobserve.ai/api/$OO_ORG/${NODE_ENV}_provider_caddy/_json"
method = "post"
auth.strategy = "basic"
auth.user = "$OO_USERNAME"
auth.password = "$OO_PASSWORD"
compression = "gzip"
encoding.codec = "json"
encoding.timestamp_format = "rfc3339"
healthcheck.enabled = false

[sources.mongo]
type = "docker_logs"
include_labels = ["vector.mongo=true"]

[transforms.mongo_format]
type = "remap"
inputs = ["mongo"]
source = '''
.=parse_json!(string!(.message))
.host="$OO_HOST"
del(.t)
del(.id)
'''

# send to openobserve
[sinks.openobserve_mongo]
type = "http"
inputs = ["mongo_format"]
uri = "https://api.openobserve.ai/api/$OO_ORG/${NODE_ENV}_provider_mongo/_json"
method = "post"
auth.strategy = "basic"
auth.user = "$OO_USERNAME"
auth.password = "$OO_PASSWORD"
compression = "gzip"
encoding.codec = "json"
encoding.timestamp_format = "rfc3339"
healthcheck.enabled = false

[sources.watchtower]
type = "docker_logs"
include_labels = ["vector.watchtower=true"]

[transforms.watchtower_format]
type = "remap"
inputs = ["watchtower"]
source = '''
.=parse_json!(string!(.message))
.host="$OO_HOST"
del(.time)
'''

# send to openobserve
[sinks.openobserve_watchtower]
type = "http"
inputs = ["watchtower_format"]
uri = "https://api.openobserve.ai/api/$OO_ORG/${NODE_ENV}_provider_watchtower/_json"
method = "post"
auth.strategy = "basic"
auth.user = "$OO_USERNAME"
auth.password = "$OO_PASSWORD"
compression = "gzip"
encoding.codec = "json"
encoding.timestamp_format = "rfc3339"
healthcheck.enabled = false
9 changes: 5 additions & 4 deletions docker/provider.Caddyfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
http_port {$CADDY_HTTP_PORT:80}
auto_https {$CADDY_AUTO_HTTPS:disable_redirects}
admin {$CADDY_ADMIN_API::2020} # set the admin api to run on localhost:2020 (default is 2019 which can conflict with caddy daemon)

# set all sites to log to json to stdout
log {
output json stdout
}
}

{$CADDY_DOMAIN} {
Expand Down Expand Up @@ -52,8 +57,4 @@
log {
output file /var/log/caddy/{$CADDY_DOMAIN}.log
}

log {
output stdout
}
}
Loading
Loading