Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: split resources by runner type #1239

Merged
merged 4 commits into from
Feb 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 23 additions & 64 deletions docker_autoscaler.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,70 +3,6 @@
# outdated docker+machine driver. The docker+machine driver is a legacy driver that is no longer maintained by GitLab.
#

########################################
###### Security Group and SG rules #####
########################################

# Base security group
resource "aws_security_group" "docker_autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0
name_prefix = "${local.name_sg}-docker-autoscaler"
vpc_id = var.vpc_id
description = "Docker-autoscaler security group"

tags = merge(
local.tags,
{
"Name" = format("%s", local.name_sg)
},
)
}

# Ingress rules
resource "aws_vpc_security_group_ingress_rule" "docker_autoscaler_ingress" {
for_each = var.runner_worker.type == "docker-autoscaler" ? var.runner_worker_ingress_rules : {}

security_group_id = aws_security_group.docker_autoscaler[0].id

from_port = each.value.from_port
to_port = each.value.to_port
ip_protocol = each.value.protocol

description = each.value.description
prefix_list_id = each.value.prefix_list_id
referenced_security_group_id = each.value.security_group
cidr_ipv4 = each.value.cidr_block
cidr_ipv6 = each.value.ipv6_cidr_block
}

resource "aws_vpc_security_group_ingress_rule" "docker_autoscaler_internal_traffic" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0

security_group_id = aws_security_group.docker_autoscaler[0].id
from_port = -1
to_port = -1
ip_protocol = "-1"
description = "Allow ALL Ingress traffic between Runner Manager and Docker-autoscaler workers security group"
referenced_security_group_id = aws_security_group.runner.id
}

# Egress rules
resource "aws_vpc_security_group_egress_rule" "docker_autoscaler_egress" {
for_each = var.runner_worker.type == "docker-autoscaler" ? var.runner_worker_egress_rules : {}

security_group_id = aws_security_group.docker_autoscaler[0].id

from_port = each.value.from_port
to_port = each.value.to_port
ip_protocol = each.value.protocol

description = each.value.description
prefix_list_id = each.value.prefix_list_id
referenced_security_group_id = each.value.security_group
cidr_ipv4 = each.value.cidr_block
cidr_ipv6 = each.value.ipv6_cidr_block
}

####################################
###### Launch template Workers #####
####################################
Expand Down Expand Up @@ -215,3 +151,26 @@ resource "aws_autoscaling_group" "autoscaler" {
]
}
}

resource "aws_iam_instance_profile" "docker_autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0
name = "${local.name_iam_objects}-docker-autoscaler"
role = aws_iam_role.docker_autoscaler[0].name
tags = local.tags
}

resource "tls_private_key" "autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0

algorithm = "RSA"
rsa_bits = 4096
}

resource "aws_key_pair" "autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0

key_name = "${var.environment}-${var.runner_worker_docker_autoscaler.key_pair_name}"
public_key = tls_private_key.autoscaler[0].public_key_openssh

tags = local.tags
}
47 changes: 47 additions & 0 deletions docker_autoscaler_policy.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
resource "aws_iam_role" "docker_autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0
name = "${local.name_iam_objects}-docker-autoscaler"
assume_role_policy = length(var.runner_worker_docker_autoscaler_role.assume_role_policy_json) > 0 ? var.runner_worker_docker_autoscaler_role.assume_role_policy_json : templatefile("${path.module}/policies/instance-role-trust-policy.json", {})
permissions_boundary = var.iam_permissions_boundary == "" ? null : "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.iam_permissions_boundary}"

tags = merge(local.tags, var.runner_worker_docker_autoscaler_role.additional_tags)
}

resource "aws_iam_policy" "instance_docker_autoscaler_policy" {
count = var.runner_worker.type == "docker-autoscaler" && var.runner_role.create_role_profile ? 1 : 0

name = "${local.name_iam_objects}-docker-autoscaler"
path = "/"
description = "Policy for docker autoscaler."
# see https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy for needed policies
policy = templatefile("${path.module}/policies/instance-docker-autoscaler-policy.json",
{
aws_region = data.aws_region.current.name
partition = data.aws_partition.current.partition
autoscaler_asg_arn = aws_autoscaling_group.autoscaler[0].arn
autoscaler_asg_name = aws_autoscaling_group.autoscaler[0].name
})

tags = local.tags
}

resource "aws_iam_role_policy_attachment" "instance_docker_autoscaler_policy" {
count = var.runner_worker.type == "docker-autoscaler" && var.runner_role.create_role_profile ? 1 : 0

role = aws_iam_role.instance[0].name
policy_arn = aws_iam_policy.instance_docker_autoscaler_policy[0].arn
}

resource "aws_iam_role_policy_attachment" "docker_autoscaler_user_defined_policies" {
count = var.runner_worker.type == "docker-autoscaler" ? length(var.runner_worker_docker_autoscaler_role.policy_arns) : 0

role = aws_iam_role.docker_autoscaler[0].name
policy_arn = var.runner_worker_docker_autoscaler_role.policy_arns[count.index]
}

resource "aws_iam_role_policy_attachment" "docker_autoscaler_session_manager_aws_managed" {
count = (var.runner_worker.type == "docker-autoscaler" && var.runner_worker.ssm_access) ? 1 : 0

role = aws_iam_role.docker_autoscaler[0].name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
58 changes: 58 additions & 0 deletions docker_autoscaler_security_group.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
resource "aws_security_group" "docker_autoscaler" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0
name_prefix = "${local.name_sg}-docker-autoscaler"
vpc_id = var.vpc_id
description = "Docker-autoscaler security group"

tags = merge(
local.tags,
{
"Name" = format("%s", local.name_sg)
},
)
}

# Ingress rules
resource "aws_vpc_security_group_ingress_rule" "docker_autoscaler_ingress" {
for_each = var.runner_worker.type == "docker-autoscaler" ? var.runner_worker_ingress_rules : {}

security_group_id = aws_security_group.docker_autoscaler[0].id

from_port = each.value.from_port
to_port = each.value.to_port
ip_protocol = each.value.protocol

description = each.value.description
prefix_list_id = each.value.prefix_list_id
referenced_security_group_id = each.value.security_group
cidr_ipv4 = each.value.cidr_block
cidr_ipv6 = each.value.ipv6_cidr_block
}

resource "aws_vpc_security_group_ingress_rule" "docker_autoscaler_internal_traffic" {
count = var.runner_worker.type == "docker-autoscaler" ? 1 : 0

security_group_id = aws_security_group.docker_autoscaler[0].id
from_port = -1
to_port = -1
ip_protocol = "-1"
description = "Allow ALL Ingress traffic between Runner Manager and Docker-autoscaler workers security group"
referenced_security_group_id = aws_security_group.runner.id
}

# Egress rules
resource "aws_vpc_security_group_egress_rule" "docker_autoscaler_egress" {
for_each = var.runner_worker.type == "docker-autoscaler" ? var.runner_worker_egress_rules : {}

security_group_id = aws_security_group.docker_autoscaler[0].id

from_port = each.value.from_port
to_port = each.value.to_port
ip_protocol = each.value.protocol

description = each.value.description
prefix_list_id = each.value.prefix_list_id
referenced_security_group_id = each.value.security_group
cidr_ipv4 = each.value.cidr_block
cidr_ipv6 = each.value.ipv6_cidr_block
}
44 changes: 44 additions & 0 deletions docker_machine.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
locals {
template_runner_docker_machine = templatefile("${path.module}/template/runner-docker-machine-config.tftpl",
{
runners_idle_count = var.runner_worker_docker_machine_instance.idle_count
runners_idle_time = var.runner_worker_docker_machine_instance.idle_time
runners_max_builds = local.runners_max_builds_string
docker_machine_name = format("%s-%s", local.runner_tags_merged["Name"], "%s") # %s is always needed
runners_instance_types = var.runner_worker_docker_machine_instance.types
aws_region = data.aws_region.current.name
runners_aws_zone = data.aws_availability_zone.runners.name_suffix
runners_userdata = var.runner_worker_docker_machine_instance.start_script

runners_vpc_id = var.vpc_id
runners_subnet_id = var.subnet_id
runners_subnet_ids = length(var.runner_worker_docker_machine_instance.subnet_ids) > 0 ? var.runner_worker_docker_machine_instance.subnet_ids : [var.subnet_id]
runners_instance_profile = var.runner_worker.type == "docker+machine" ? aws_iam_instance_profile.docker_machine[0].name : ""

runners_use_private_address_only = var.runner_worker_docker_machine_instance.private_address_only
runners_use_private_address = !var.runner_worker_docker_machine_instance.private_address_only
runners_request_spot_instance = var.runner_worker_docker_machine_instance_spot.enable
runners_spot_price_bid = var.runner_worker_docker_machine_instance_spot.max_price == "on-demand-price" || var.runner_worker_docker_machine_instance_spot.max_price == null ? "" : var.runner_worker_docker_machine_instance_spot.max_price
runners_security_group_name = var.runner_worker.type == "docker+machine" ? aws_security_group.docker_machine[0].name : ""

runners_tags = replace(replace(local.runner_tags_string, ",,", ","), "/,$/", "")
runners_ebs_optimized = var.runner_worker_docker_machine_instance.ebs_optimized
runners_monitoring = var.runner_worker_docker_machine_instance.monitoring
runners_iam_instance_profile_name = var.runner_worker_docker_machine_role.profile_name
runners_root_size = var.runner_worker_docker_machine_instance.root_size
runners_volume_type = var.runner_worker_docker_machine_instance.volume_type
runners_ami = var.runner_worker.type == "docker+machine" ? (length(var.runner_worker_docker_machine_ami_id) > 0 ? var.runner_worker_docker_machine_ami_id : data.aws_ami.docker_machine_by_filter[0].id) : ""
use_fleet = var.runner_worker_docker_machine_fleet.enable
launch_template = var.runner_worker_docker_machine_fleet.enable == true ? aws_launch_template.fleet_gitlab_runner[0].name : ""
docker_machine_options = length(local.docker_machine_options_string) == 1 ? "" : local.docker_machine_options_string
runners_max_growth_rate = var.runner_worker_docker_machine_instance.max_growth_rate
runners_volume_kms_key = local.kms_key_arn
})
}

resource "aws_iam_instance_profile" "docker_machine" {
count = var.runner_worker.type == "docker+machine" ? 1 : 0
name = "${local.name_iam_objects}-docker-machine"
role = aws_iam_role.docker_machine[0].name
tags = local.tags
}
80 changes: 80 additions & 0 deletions docker_machine_fleet.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
resource "aws_key_pair" "fleet" {
count = var.runner_worker_docker_machine_fleet.enable == true && var.runner_worker.type == "docker+machine" ? 1 : 0

key_name = "${var.environment}-${var.runner_worker_docker_machine_fleet.key_pair_name}"
public_key = tls_private_key.fleet[0].public_key_openssh

tags = local.tags
}

resource "tls_private_key" "fleet" {
count = var.runner_worker_docker_machine_fleet.enable == true && var.runner_worker.type == "docker+machine" ? 1 : 0

algorithm = "RSA"
rsa_bits = 4096
}

resource "aws_launch_template" "fleet_gitlab_runner" {
# checkov:skip=CKV_AWS_88:User can decide to add a public IP.
# checkov:skip=CKV_AWS_79:User can decide to enable Metadata service V2. V2 is the default.
# checkov:skip=CKV_AWS_341:Hop limit is user-defined and set to 2 by default as the workload might run in a Docker container.
count = var.runner_worker_docker_machine_fleet.enable == true && var.runner_worker.type == "docker+machine" ? 1 : 0
name_prefix = "${local.name_runner_agent_instance}-worker-"

key_name = aws_key_pair.fleet[0].key_name
image_id = length(var.runner_worker_docker_machine_ami_id) > 0 ? var.runner_worker_docker_machine_ami_id : data.aws_ami.docker_machine_by_filter[0].id
user_data = base64gzip(var.runner_worker_docker_machine_instance.start_script)
instance_type = var.runner_worker_docker_machine_instance.types[0] # it will be override by the fleet
update_default_version = true
ebs_optimized = var.runner_worker_docker_machine_instance.ebs_optimized
monitoring {
enabled = var.runner_worker_docker_machine_instance.monitoring
}
block_device_mappings {
device_name = var.runner_worker_docker_machine_instance.root_device_name

ebs {
volume_size = var.runner_worker_docker_machine_instance.root_size
volume_type = var.runner_worker_docker_machine_instance.volume_type
iops = contains(["gp3", "io1", "io2"], var.runner_worker_docker_machine_instance.volume_type) ? var.runner_worker_docker_machine_instance.volume_iops : null
throughput = var.runner_worker_docker_machine_instance.volume_type == "gp3" ? var.runner_worker_docker_machine_instance.volume_throughput : null
encrypted = true
kms_key_id = local.kms_key_arn
}
}

iam_instance_profile {
name = aws_iam_instance_profile.docker_machine[0].name
}

network_interfaces {
security_groups = [aws_security_group.docker_machine[0].id]
associate_public_ip_address = !var.runner_worker_docker_machine_instance.private_address_only
}

tag_specifications {
resource_type = "instance"
tags = local.tags
}
tag_specifications {
resource_type = "volume"
tags = local.tags
}
tag_specifications {
resource_type = "network-interface"
tags = local.tags
}
# tag_specifications for spot-instances-request do not work. Instance creation fails.

tags = local.tags

metadata_options {
http_tokens = var.runner_worker_docker_machine_ec2_metadata_options.http_tokens
http_put_response_hop_limit = var.runner_worker_docker_machine_ec2_metadata_options.http_put_response_hop_limit
instance_metadata_tags = "enabled"
}

lifecycle {
create_before_destroy = true
}
}
54 changes: 54 additions & 0 deletions docker_machine_policy.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
################################################################################
### docker machine instance policy
################################################################################
resource "aws_iam_role" "docker_machine" {
count = var.runner_worker.type == "docker+machine" ? 1 : 0
name = "${local.name_iam_objects}-docker-machine"
assume_role_policy = length(var.runner_worker_docker_machine_role.assume_role_policy_json) > 0 ? var.runner_worker_docker_machine_role.assume_role_policy_json : templatefile("${path.module}/policies/instance-role-trust-policy.json", {})
permissions_boundary = var.iam_permissions_boundary == "" ? null : "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/${var.iam_permissions_boundary}"

tags = merge(local.tags, var.runner_worker_docker_machine_role.additional_tags)
}

################################################################################
### Policies for runner agent instance to create docker machines via spot req.
###
### iam:PassRole To pass the role from the agent to the docker machine runners
################################################################################
resource "aws_iam_policy" "instance_docker_machine_policy" {
count = var.runner_worker.type == "docker+machine" && var.runner_role.create_role_profile ? 1 : 0

name = "${local.name_iam_objects}-docker-machine"
path = "/"
description = "Policy for docker machine."
policy = templatefile("${path.module}/policies/instance-docker-machine-policy.json",
{
docker_machine_role_arn = aws_iam_role.docker_machine[0].arn
})

tags = local.tags
}

resource "aws_iam_role_policy_attachment" "instance_docker_machine_policy" {
count = var.runner_worker.type == "docker+machine" && var.runner_role.create_role_profile ? 1 : 0

role = aws_iam_role.instance[0].name
policy_arn = aws_iam_policy.instance_docker_machine_policy[0].arn
}

################################################################################
### Add user defined policies
################################################################################
resource "aws_iam_role_policy_attachment" "docker_machine_user_defined_policies" {
count = var.runner_worker.type == "docker+machine" ? length(var.runner_worker_docker_machine_role.policy_arns) : 0

role = aws_iam_role.docker_machine[0].name
policy_arn = var.runner_worker_docker_machine_role.policy_arns[count.index]
}

resource "aws_iam_role_policy_attachment" "docker_machine_session_manager_aws_managed" {
count = (var.runner_worker.type == "docker+machine" && var.runner_worker.ssm_access) ? 1 : 0

role = aws_iam_role.docker_machine[0].name
policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
Loading