Skip to content

Commit

Permalink
Some linting, adding github workflow, other fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
mentics committed Apr 19, 2024
1 parent 47a00e2 commit d9590c4
Show file tree
Hide file tree
Showing 19 changed files with 243 additions and 93 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/structurizr-cli.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: upload-sctructurizr

on:
push:
branches:
- 'main'
paths:
- 'docs/structurizr.dsl'

jobs:
structurizr-cli:
runs-on: ubuntu-latest
name: Run structurizr-cli
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Run structurizr-cli action
uses: aidmax/structurizr-cli-action@v1
id: test
with:
id: ${{ secrets.structurizr_workspace_id }}
key: ${{ secrets.structurizr_api_key }}
secret: ${{ secrets.structurizr_api_secret }}
workspace: docs/structurizr.dsl

# optional parameters
url: ${{ secrets.structurizr_api_url }}
merge: false
archive: false
17 changes: 0 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,3 @@ For running commands or troubleshooting, you can set the vars in your current sh
# Notes:

I specified t4g.small for size because they are free during 2024. Also, oddly enough, the on-demand t4g.small's are free, but it seems I was charged a few cents when running them as spot instances. So, these scripts are currently not running spot instances until that changes.



CertBot:

sudo dnf install certbot
systemctl start certbot-renew.timer

# TODO: can maybe automate with a hook script later
sudo certbot certonly --manual --authenticator manual --preferred-challenges dns --debug-challenges -d \*.mentics.com -d mentics.com -v
sudo cat /etc/letsencrypt/live/mentics.com/fullchain.pem /etc/letsencrypt/live/mentics.com/privkey.pem | sudo dd of=/etc/haproxy/site.pem


# cd /etc/letsencrypt
# sudo wget https://github.com/joohoi/acme-dns-certbot-joohoi/raw/master/acme-dns-auth.py
# sudo chmod 700
# sudo certbot certonly --manual --manual-auth-hook /etc/letsencrypt/acme-dns-auth.py --preferred-challenges dns --debug-challenges -d \*.mentics.com -d mentics.com -v
87 changes: 87 additions & 0 deletions docs/structurizr.dsl
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
workspace {
# !identifiers hierarchical

model {
user_monitor = Person "Adminstrator"
user_prediction = Person "User of Predict"

external_data = SoftwareSystem "External Data" "Source of data coming into the system."

softwareSystem = SoftwareSystem "Software System" "My software system." {
ingest = Container "Ingest" {
monitor = Component "Monitor"
}
predict = Container "Predict"
label = Container "Label"
train = Container "Train"
ui = Container "Web UI"

redpanda = Container "RedPanda" {
series_raw = Component "Raw Topic"
series_features = Component "Features Topic"
series_actual = Component "Actual Topic"
}
scylladb = Container "ScyllaDB" {
prediction = Component "Prediction"
summary = Component "Reporting Data"
}

models = Container "Model Store"

external_data -> ingest "read websocket stream"
ingest -> series_raw "write stream raw"
ingest -> series_features "write stream feature"
# predict <- redpanda "read stream feature window"
series_features -> predict "read stream feature window"
predict -> prediction "write predict"
# label <- redpanda "read stream raw"
series_raw -> label "read stream raw"
label -> series_actual "write stream actual"
# train <- redpanda "read stream actual" # trigger
series_actual -> train "read stream actual" # trigger
# train <- redpanda "read predict"
redpanda -> train "read predict"
# train <- models "read model versions"
models -> train "read model versions"
train -> models "write updated model versions"
}

monitor -> user_monitor "monitors"
summary -> "ui"
ui -> user_prediction "reports"

# deploymentEnvironment "Production" {
# deploymentNode {

# }
# }
}

views {
# systemContext SoftwareSystem "SystemContext" {
# include *
# autoLayout
# }

# styles {
# element "Software System" {
# background #1168bd
# color #ffffff
# }
# element "Person" {
# shape person
# background #08427b
# color #ffffff
# }
# }
styles {
element "Element" {
background #1168bd
color #ffffff
shape RoundedBox
}

themes https://static.structurizr.com/themes/amazon-web-services-2023.01.31/theme.json https://static.structurizr.com/themes/kubernetes-v0.3/theme.json
}
}
}
12 changes: 7 additions & 5 deletions haproxy/connect.sh
Original file line number Diff line number Diff line change
@@ -1,25 +1,27 @@
#!/bin/bash
# Pass in arguments to run commands instead of entering a shell

cd "$(dirname "${BASH_SOURCE[0]}")"
cd "$(dirname "${BASH_SOURCE[0]}")" || exit

if [ -f "../out/lb-ip.txt" ]; then
echo "Using dns name from out/lb-ip.txt"
PUBLIC_DNS=$(cat ../out/lb-ip.txt)
else
res=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=["HAProxy"]" --query "Reservations[*].Instances[*].[PublicDnsName]" --output text)
res=$(aws ec2 describe-instances --filters 'Name=tag:Name,Values=["HAProxy"]' --query "Reservations[*].Instances[*].[PublicDnsName]" --output text)
PUBLIC_DNS=${res//[ $'\n']/}
echo ${PUBLIC_DNS} > ../out/lb-ip.txt
echo "${PUBLIC_DNS}" > ../out/lb-ip.txt
echo "Connecting to ${PUBLIC_DNS}"
fi

if [ -n "$1" ]; then
if [ "$1" == 'scp' ]; then
scp -r -i ~/.ssh/awsec2.pem "$2" "ec2-user@${PUBLIC_DNS}:$3"
elif [ "$1" == 'pcs' ]; then
scp -r -i ~/.ssh/awsec2.pem "ec2-user@${PUBLIC_DNS}:$2" "$3"
else
ssh -t -i ~/.ssh/awsec2.pem ec2-user@${PUBLIC_DNS} "$1"
ssh -t -i ~/.ssh/awsec2.pem ec2-user@"${PUBLIC_DNS}" "$1"
fi
else
ssh -i ~/.ssh/awsec2.pem ec2-user@${PUBLIC_DNS}
ssh -i ~/.ssh/awsec2.pem ec2-user@"${PUBLIC_DNS}"
fi

File renamed without changes.
6 changes: 3 additions & 3 deletions haproxy/make-conf.sh → haproxy/make-haproxy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ if [ -z "$1" ]; then
exit 1
fi
OUTPUT_PATH="$(pwd)/$1"
echo $OUTPUT_PATH
echo "$OUTPUT_PATH"

set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
Expand All @@ -27,5 +27,5 @@ echo "Getting instance list for auto scaling group ${CONTROL_ASG_ID}"
control_refs=$(aws ec2 describe-instances --filters "Name=tag:aws:autoscaling:groupName,Values=${CONTROL_ASG_ID}" --query "Reservations[*].Instances[*].[PrivateDnsName]" --output text)
echo "Found instances: ${control_refs}"

cd ${WORK_DIR}
./gen-conf.sh "${control_refs}" "${nodes_refs}" > "${OUTPUT_PATH}"
cd "${WORK_DIR}"
./gen-haproxy.sh "${control_refs}" "${nodes_refs}" > "${OUTPUT_PATH}"
9 changes: 7 additions & 2 deletions haproxy/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# TODO: when implement auto match auto scaling group, may need to save state: https://stackoverflow.com/a/36750445/315734

cd "$(dirname "${BASH_SOURCE[0]}")"
cd "$(dirname "${BASH_SOURCE[0]}")" || exit

haproxy_local="../out/haproxy.cfg"
haproxy_remote="/home/ec2-user/haproxy.cfg"
Expand All @@ -12,11 +12,15 @@ nginx_remote="/home/ec2-user/nginx.conf"
site_local="site"
site_remote="/home/ec2-user/"

./make-conf.sh ${haproxy_local}
pem_local="$HOME/.site.pem"
pem_remote="/home/ec2-user/site.pem"

./make-haproxy.sh ${haproxy_local}

./connect.sh scp ${haproxy_local} ${haproxy_remote}
./connect.sh scp ${nginx_local} ${nginx_remote}
./connect.sh scp ${site_local} ${site_remote}
./connect.sh scp ${pem_local} ${pem_remote}

./connect.sh <<EOT
sudo dnf update
Expand All @@ -27,5 +31,6 @@ sudo mkdir -p /srv/www/default && sudo cp -r ${site_remote}/site/* /srv/www/defa
sudo systemctl restart nginx
sudo cp ${haproxy_remote} /etc/haproxy/haproxy.cfg
sudo cp ${pem_remote} /etc/haproxy/site.pem
sudo systemctl restart haproxy
EOT
2 changes: 1 addition & 1 deletion haproxy/update-haproxy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
haproxy_local="../out/haproxy.cfg"
haproxy_remote="/home/ec2-user/haproxy.cfg"

./make-conf.sh ${haproxy_local}
./make-haproxy.sh ${haproxy_local}

./connect.sh scp ${haproxy_local} ${haproxy_remote}
./connect.sh <<EOT
Expand Down
16 changes: 8 additions & 8 deletions kops/cleanup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# TODO: if we use the spot option where kops resources are created, delete them here.
echo "Deleting kops resource cluster ${CLUSTER_NAME}"
kops delete cluster --name ${CLUSTER_NAME} --yes
kops delete cluster --name "${CLUSTER_NAME}" --yes

# Delete the kops state and oidc buckets
delete_bucket() {
Expand All @@ -11,19 +11,19 @@ delete_bucket() {

# TODO: put if statement around the subcommand to avoid error if it returns nothing
aws s3api delete-objects --no-cli-pager \
--bucket ${DELETE} \
--delete "$(aws s3api list-object-versions --bucket ${DELETE} --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')"
--bucket "${DELETE}" \
--delete "$(aws s3api list-object-versions --bucket "${DELETE}" --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')"

# TODO: put if statement around the subcommand to avoid error if it returns nothing
aws s3api delete-objects --no-cli-pager \
--bucket ${DELETE} \
--delete "$(aws s3api list-object-versions --bucket ${DELETE} --query='{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}}')"
--bucket "${DELETE}" \
--delete "$(aws s3api list-object-versions --bucket "${DELETE}" --query='{Objects: DeleteMarkers[].{Key:Key,VersionId:VersionId}}')"

aws s3api delete-bucket --no-cli-pager --bucket ${DELETE}
aws s3api delete-bucket --no-cli-pager --bucket "${DELETE}"
}

delete_bucket ${STATE_STORE}
delete_bucket ${OIDC_STORE}
delete_bucket "${STATE_STORE}"
delete_bucket "${OIDC_STORE}"


# cd "$(dirname "${BASH_SOURCE[0]}")"
Expand Down
46 changes: 22 additions & 24 deletions kops/gen-terraform.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,13 @@
# Make paths based on location of this script
cd "$(dirname "${BASH_SOURCE[0]}")"

kops get all --name ${CLUSTER_NAME} > /dev/null 2>&1

if [[ $? -eq 0 ]]
if kops get all --name "${CLUSTER_NAME}" > /dev/null 2>&1
then
read -p "Cluster config already exists in kops, delete? [yN]" -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
kops delete cluster --name ${CLUSTER_NAME} --unregister --yes
kops delete cluster --name "${CLUSTER_NAME}" --unregister --yes
else
exit 1
fi
Expand All @@ -23,29 +21,29 @@ set -e
echo "Generating cluster configuration in kops"
kops create cluster \
--ssh-public-key ~/.ssh/id_ed25519.pub \
--name ${CLUSTER_NAME} \
--discovery-store ${OIDC_STORE_URL} \
--state ${STATE_STORE_URL} \
--name "${CLUSTER_NAME}" \
--discovery-store "${OIDC_STORE_URL}" \
--state "${STATE_STORE_URL}" \
--cloud aws \
--zones ${ZONES} \
--zones "${ZONES}" \
--image amazon/al2023-ami-2023.4.20240401.1-kernel-6.1-arm64 \
--topology private \
--associate-public-ip=false \
--control-plane-size ${CONTROL_PLANE_SIZE} \
--control-plane-count ${CONTROL_PLANE_COUNT} \
--node-size ${NODE_SIZE} \
--node-count ${NODE_COUNT}
--control-plane-size "${CONTROL_PLANE_SIZE}" \
--control-plane-count "${CONTROL_PLANE_COUNT}" \
--node-size "${NODE_SIZE}" \
--node-count "${NODE_COUNT}"

echo "Making some modifications to default config"
kops edit ig control-plane-us-west-2b --name ${CLUSTER_NAME} --set "spec.rootVolume.size=8"
kops edit ig control-plane-us-west-2b --name ${CLUSTER_NAME} --set "spec.rootVolume.encryption=false"
kops edit ig nodes-us-west-2b --name ${CLUSTER_NAME} --set "spec.rootVolume.size=8"
kops edit ig nodes-us-west-2b --name ${CLUSTER_NAME} --set "spec.rootVolume.encryption=false"
kops edit cluster --name ${CLUSTER_NAME} --set "spec.etcdClusters[0].etcdMembers[0].volumeSize=8"
kops edit cluster --name ${CLUSTER_NAME} --set "spec.etcdClusters[0].etcdMembers[0].encryptedVolume=false"
kops edit cluster --name ${CLUSTER_NAME} --set "spec.etcdClusters[1].etcdMembers[0].volumeSize=8"
kops edit cluster --name ${CLUSTER_NAME} --set "spec.etcdClusters[1].etcdMembers[0].encryptedVolume=false"
kops edit cluster --name ${CLUSTER_NAME} --unset "spec.api.loadBalancer"
kops edit ig control-plane-us-west-2b --name "${CLUSTER_NAME}" --set "spec.rootVolume.size=8"
kops edit ig control-plane-us-west-2b --name "${CLUSTER_NAME}" --set "spec.rootVolume.encryption=false"
kops edit ig nodes-us-west-2b --name "${CLUSTER_NAME}" --set "spec.rootVolume.size=8"
kops edit ig nodes-us-west-2b --name "${CLUSTER_NAME}" --set "spec.rootVolume.encryption=false"
kops edit cluster --name "${CLUSTER_NAME}" --set "spec.etcdClusters[0].etcdMembers[0].volumeSize=8"
kops edit cluster --name "${CLUSTER_NAME}" --set "spec.etcdClusters[0].etcdMembers[0].encryptedVolume=false"
kops edit cluster --name "${CLUSTER_NAME}" --set "spec.etcdClusters[1].etcdMembers[0].volumeSize=8"
kops edit cluster --name "${CLUSTER_NAME}" --set "spec.etcdClusters[1].etcdMembers[0].encryptedVolume=false"
kops edit cluster --name "${CLUSTER_NAME}" --unset "spec.api.loadBalancer"

# If you want to use spot instances, comment the line with target=terraform above, and uncomment the following:
# kops edit ig control-plane-us-west-2b \
Expand All @@ -66,13 +64,13 @@ kops edit cluster --name ${CLUSTER_NAME} --unset "spec.api.loadBalancer"

echo "Exporting kops cluster config to terraform"
kops update cluster \
--name ${CLUSTER_NAME} \
--state ${STATE_STORE_URL} \
--name "${CLUSTER_NAME}" \
--state "${STATE_STORE_URL}" \
--admin \
--target=terraform

echo "Exporting kops cluster config out/kops-${CLUSTER_NAME}.yaml"
kops get all ${CLUSTER_NAME} -o yaml > out/kops-${CLUSTER_NAME}.yaml
kops get all "${CLUSTER_NAME}" -o yaml > out/kops-"${CLUSTER_NAME}".yaml

rm -rf ../out
mv out ..
Expand Down
2 changes: 1 addition & 1 deletion kops/kubectl-context.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/bin/bash

kops export kubeconfig --name ${CLUSTER_NAME}
kops export kubeconfig --name "${CLUSTER_NAME}"
10 changes: 5 additions & 5 deletions kops/state-store.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,20 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
echo "Creating state store in S3: ${STATE_STORE_URL}"

aws s3api create-bucket --no-cli-pager \
--bucket ${STATE_STORE} \
--bucket "${STATE_STORE}" \
--region us-east-1
aws s3api put-bucket-versioning --no-cli-pager --bucket ${STATE_STORE} --versioning-configuration Status=Enabled
aws s3api put-bucket-versioning --no-cli-pager --bucket "${STATE_STORE}" --versioning-configuration Status=Enabled


echo "Creating OIDC store in S3: ${OIDC_STORE_URL}"

aws s3api create-bucket --no-cli-pager \
--bucket ${OIDC_STORE} \
--bucket "${OIDC_STORE}" \
--region us-east-1 \
--object-ownership BucketOwnerPreferred
aws s3api put-public-access-block --no-cli-pager \
--bucket ${OIDC_STORE} \
--bucket "${OIDC_STORE}" \
--public-access-block-configuration BlockPublicAcls=false,IgnorePublicAcls=false,BlockPublicPolicy=false,RestrictPublicBuckets=false
aws s3api put-bucket-acl --no-cli-pager \
--bucket ${OIDC_STORE} \
--bucket "${OIDC_STORE}" \
--acl public-read
Loading

0 comments on commit d9590c4

Please sign in to comment.