Skip to content

Commit

Permalink
Merge pull request securefederatedai#1140 from MasterSkepticista/dock…
Browse files Browse the repository at this point in the history
…erize_gramine

Gramine-SGX Container TEE support
  • Loading branch information
MasterSkepticista authored Nov 15, 2024
2 parents 63177f4 + 9d9a624 commit 1d7c9bf
Show file tree
Hide file tree
Showing 9 changed files with 390 additions and 20 deletions.
97 changes: 97 additions & 0 deletions .github/workflows/tr_docker_gramine_direct.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# Tests an FL experiment in a Dockerized environment.
name: TaskRunner (docker/gramine-direct)

on:
pull_request:
branches: [ develop ]
types: [opened, synchronize, reopened, ready_for_review]

permissions:
contents: read

jobs:
build:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
timeout-minutes: 10

steps:
- uses: actions/checkout@v3
- name: Set up Python 3.8
uses: actions/setup-python@v3
with:
python-version: "3.8"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .
- name: Create workspace image
run: |
fx workspace create --prefix example_workspace --template keras_cnn_mnist
cd example_workspace
fx plan initialize -a localhost
fx workspace dockerize --save --revision https://github.com/${GITHUB_REPOSITORY}.git@${{ github.event.pull_request.head.sha }}
- name: Create certificate authority for workspace
run: |
cd example_workspace
fx workspace certify
- name: Create signed cert for collaborator
run: |
cd example_workspace
fx collaborator create -d 1 -n charlie --silent
fx collaborator generate-cert-request -n charlie --silent
fx collaborator certify --request-pkg col_charlie_to_agg_cert_request.zip --silent
# Pack the collaborator's private key, signed cert, and data.yaml into a tarball
tarfiles="plan/data.yaml agg_to_col_charlie_signed_cert.zip"
for entry in cert/client/*; do
if [[ "$entry" == *.key ]]; then
tarfiles="$tarfiles $entry"
fi
done
tar -cf cert_col_charlie.tar $tarfiles
# Clean up
rm -f $tarfiles
rm -f col_charlie_to_agg_cert_request.zip
- name: Create signed cert for aggregator
run: |
cd example_workspace
fx aggregator generate-cert-request --fqdn localhost
fx aggregator certify --fqdn localhost --silent
# Pack all files that aggregator needs to start training
tar -cf cert_agg.tar plan cert save
# Remove the directories after archiving
rm -rf plan cert save
- name: Load workspace image
run: |
cd example_workspace
docker load -i example_workspace.tar
- name: Run aggregator and collaborator
run: |
cd example_workspace
set -x
docker run --rm \
--network host \
--security-opt seccomp=unconfined \
--mount type=bind,source=./cert_agg.tar,target=/certs.tar \
--env KERAS_HOME=/tmp \
example_workspace bash -c "tar -xf /certs.tar && gramine-direct fx aggregator start" &
# TODO: Run with two collaborators instead.
docker run --rm \
--network host \
--security-opt seccomp=unconfined \
--mount type=bind,source=./cert_col_charlie.tar,target=/certs.tar \
--env KERAS_HOME=/tmp \
example_workspace bash -c "tar -xf /certs.tar && fx collaborator certify --import agg_to_col_charlie_signed_cert.zip && gramine-direct fx collaborator start -n charlie"
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Tests an FL experiment in a Dockerized environment.
name: Dockerization
name: TaskRunner (docker/native)

on:
pull_request:
Expand Down
34 changes: 25 additions & 9 deletions openfl-docker/Dockerfile.base
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ------------------------------------
# OpenFL Base Image
# OpenFL Base Image w/ Gramine support
# $> docker build . -t openfl -f Dockerfile.base [--build-arg OPENFL_REVISION=GIT_URL@COMMIT_ID]
# ------------------------------------
FROM ubuntu:22.04 AS base
Expand All @@ -15,25 +15,41 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
apt-get update && \
apt-get install -y \
git \
curl \
python3-pip \
python3.10-dev \
python3.10-venv \
ca-certificates \
build-essential \
--no-install-recommends && \
apt-get purge -y linux-libc-dev && \
rm -rf /var/lib/apt/lists/*

# Create a python virtual environment.
RUN python3.10 -m venv /opt/venv && \
/opt/venv/bin/pip install --no-cache-dir --upgrade pip setuptools wheel
ENV PATH=/opt/venv/bin:$PATH

# Install Gramine
RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
curl -fsSLo /usr/share/keyrings/gramine-keyring.gpg https://packages.gramineproject.io/gramine-keyring.gpg && \
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/gramine-keyring.gpg] https://packages.gramineproject.io/ jammy main" \
| tee /etc/apt/sources.list.d/gramine.list && \
curl -fsSLo /usr/share/keyrings/intel-sgx-deb.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key && \
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-sgx-deb.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu jammy main" \
| tee /etc/apt/sources.list.d/intel-sgx.list && \
apt-get update && \
apt-get install -y gramine --no-install-recommends && \
rm -rf /var/lib/apt/lists/*

# Install OpenFL.
ARG OPENFL_REVISION=https://github.com/securefederatedai/[email protected]
RUN pip install --no-cache-dir git+${OPENFL_REVISION} && \
INSTALL_SOURCES=yes /opt/venv/lib/python3.10/site-packages/openfl-docker/licenses.sh

# Create an unprivileged user.
RUN groupadd -g 1001 default && \
useradd -m -u 1001 -g default user
USER user
WORKDIR /home/user
ENV PATH=/home/user/.local/bin:$PATH

# Install OpenFL.
ARG OPENFL_REVISION=https://github.com/securefederatedai/[email protected]
RUN pip install --no-cache-dir -U pip setuptools wheel && \
pip install --no-cache-dir git+${OPENFL_REVISION} && \
INSTALL_SOURCES=yes /home/user/.local/lib/python3.10/site-packages/openfl-docker/licenses.sh

CMD ["/bin/bash"]
27 changes: 21 additions & 6 deletions openfl-docker/Dockerfile.workspace
Original file line number Diff line number Diff line change
@@ -1,18 +1,33 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ------------------------------------
# Workspace Image
# Gramine-ready Workspace Image
# Usage:
# $> docker build . -t openfl-workspace -f Dockerfile.workspace \
# [--build-arg BASE_IMAGE=openfl:latest] \
# [--build-arg WORKSPACE_NAME=WORKSPACE_NAME] \
# [--secret id=signer-key,src=signer-key.pem]
# ------------------------------------
ARG BASE_IMAGE=openfl:latest
FROM ${BASE_IMAGE}

USER root
SHELL ["/bin/bash", "-o", "pipefail", "-c"]

USER user
# Import workspace
WORKDIR /
ARG WORKSPACE_NAME
COPY ${WORKSPACE_NAME}.zip .
RUN fx workspace import --archive ${WORKSPACE_NAME}.zip && \
pip install --no-cache-dir -r ${WORKSPACE_NAME}/requirements.txt
COPY ${WORKSPACE_NAME}.zip /workspace.zip
RUN fx workspace import --archive /workspace.zip && \
pip install --no-cache-dir -r /workspace/requirements.txt

# Build enclaves
WORKDIR /workspace
RUN --mount=type=secret,id=signer-key,dst=/key.pem \
cp -r /opt/venv/lib/python3.10/site-packages/openfl-docker/gramine_app/* /workspace/ && \
make SGX=1 SGX_SIGNER_KEY=/key.pem >> fx.mr_enclave && \
echo "$(cat fx.mr_enclave)" && \
chown -R user /workspace

WORKDIR /home/user/${WORKSPACE_NAME}
USER user
CMD ["/bin/bash"]
89 changes: 89 additions & 0 deletions openfl-docker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Using OpenFL within a Container

OpenFL can be used within a container for simulating Federated Learning experiments, or to deploy real-world experiments within Trusted Execution Environments (TEEs).

## Base Image

To develop or simulate experiments within a container, build the base image (or pull one from docker hub).

```shell
# Pull latest stable base image
$> docker pull intel/openfl

# Or, build a base image from the latest source code
$> docker build . -t openfl -f Dockerfile.base \
--build-arg OPENFL_REVISION=https://github.com/securefederatedai/openfl.git@develop
```

Run the container:
```shell
user@vm:~/openfl$ docker run -it --rm openfl:latest bash
user@7b40624c207a:/$ fx
OpenFL - Open Federated Learning

BASH COMPLETE ACTIVATION

Run in terminal:
_FX_COMPLETE=bash_source fx > ~/.fx-autocomplete.sh
source ~/.fx-autocomplete.sh
If ~/.fx-autocomplete.sh has already exist:
source ~/.fx-autocomplete.sh

CORRECT USAGE

fx [options] [command] [subcommand] [args]
```

## Deployment
This section assumes familiarity with the [TaskRunner API](https://openfl.readthedocs.io/en/latest/about/features_index/taskrunner.html#running-the-task-runner).

### Building a workspace image
OpenFL supports [Gramine-based](https://gramine.readthedocs.io/en/stable/) TEEs that run within SGX.

To build a TEE-ready workspace image, run the following command from an existing workspace directory. Ensure PKI setup and plan confirmations are done before this step.

```shell
# Optional, generate an enclave signing key (auto-generated otherwise)
user@vm:~/example_workspace$ openssl genrsa -out key.pem -3 3072
user@vm:~/example_workspace$ fx workspace dockerize --enclave-key ./key.pem --save
```
This command builds the base image and a TEE-ready workspace image. Refer to `fx workspace dockerize --help` for more details.

A signed docker image named `example_workspace.tar` will be saved in the workspace. This image (along with respective PKI certificates) can be shared across participating entities.

### Running without a TEE
Using native `fx` command within the image will run the experiment without TEEs.

```shell
# Aggregator
docker run --rm \
--network host \
--mount type=bind,source=./certs.tar,target=/certs.tar \
example_workspace bash -c "fx aggregator start ..."

# Collaborator(s)
docker run --rm \
--network host \
--mount type=bind,source=./certs.tar,target=/certs.tar \
example_workspace bash -c "fx collaborator start ..."
```

### Running within a TEE
To run `fx` within a TEE, mount SGX device and AESMD volumes. In addition, prefix the `fx` command with `gramine-sgx` directive.
```shell
# Aggregator
docker run --rm \
--network host \
--device=/dev/sgx_enclave \
-v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \
--mount type=bind,source=./certs.tar,target=/certs.tar \
example_workspace bash -c "gramine-sgx fx aggregator start ..."

# Collaborator(s)
docker run --rm \
--network host \
--device=/dev/sgx_enclave \
-v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \
--mount type=bind,source=./certs.tar,target=/certs.tar \
example_workspace bash -c "gramine-sgx fx collaborator start ..."
```
54 changes: 54 additions & 0 deletions openfl-docker/gramine_app/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ------------------------------------
# Makefile for Gramine application within a container
# Usage:
# 1. Activate the python venv.
# 2. Provide paths VENV_ROOT and WORKSPACE_ROOT.
# 3. make SGX=0/1 [SGX_SIGNER_KEY=<path_to_sgx_signer_key>]
# ------------------------------------
VENV_ROOT ?= $(shell dirname $(shell dirname $(shell which python)))
WORKSPACE_ROOT ?= $(shell pwd)
ARCH_LIBDIR ?= /lib/$(shell $(CC) -dumpmachine)
SGX_SIGNER_KEY ?= /key.pem

ifeq ($(DEBUG),1)
GRAMINE_LOG_LEVEL = debug
else
GRAMINE_LOG_LEVEL = error
endif

.PHONY: all
all: fx.manifest
ifeq ($(SGX),1)
all: fx.manifest.sgx fx.sig
endif

fx.manifest: fx.manifest.template
@echo "Making fx.manifest file"
gramine-manifest \
-Dlog_level=$(GRAMINE_LOG_LEVEL) \
-Darch_libdir=$(ARCH_LIBDIR) \
-Dvenv_root=$(VENV_ROOT) \
-Dentrypoint=$(VENV_ROOT)/bin/fx \
-Dworkspace_root=$(WORKSPACE_ROOT) \
$< >$@

fx.manifest.sgx: fx.manifest
@echo "Making fx.manifest.sgx file"
@test -s $(SGX_SIGNER_KEY) || \
{ echo "SGX signer private key was not found, please specify SGX_SIGNER_KEY!"; exit 1; }
@gramine-sgx-sign \
--key $(SGX_SIGNER_KEY) \
--manifest $< \
--output $@ | tail -n 1 | tr -d ' ' | xargs -I {} echo "fx.mr_enclave={}"

fx.sig: fx.manifest.sgx

.PHONY: clean
clean:
$(RM) *.manifest *.manifest.sgx *.token *.sig OUTPUT* *.PID TEST_STDOUT TEST_STDERR
$(RM) -r scripts/__pycache__

.PHONY: distclean
distclean: clean
Loading

0 comments on commit 1d7c9bf

Please sign in to comment.