-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDockerfile.Sgpu
37 lines (28 loc) · 1.48 KB
/
Dockerfile.Sgpu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# Use NVIDIA CUDA 12.1 base image
FROM nvidia/cuda:12.1.0-base-ubuntu22.04
# Install necessary packages (including curl)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
curl \
gnupg2 \
software-properties-common
# Add NVIDIA repository and install NVIDIA Container Toolkit
RUN curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey | gpg --dearmor -o /usr/share/keyrings/nvidia-docker.gpg && \
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/nvidia-docker.gpg] https://nvidia.github.io/nvidia-docker/ $(. /etc/os-release; echo $ID$VERSION_ID) /" | tee /etc/apt/sources.list.d/nvidia-docker.list && \
apt-get update && \
apt-get install -y nvidia-docker2
# Set the working directory in the container
WORKDIR /app
# Install Python and pip
RUN apt-get update && apt-get install -y python3 python3-pip
# Copy the current directory contents into the container at /app
COPY requirements.txt api_server.py modeling.py hug.py text_preprocessing.py user_speech_modeling.py ./
# Install any needed packages specified in requirements.txt
RUN pip3 install --no-cache-dir -r requirements.txt
# Install PyTorch 2.3.0 with CUDA 12.1 support
RUN pip3 install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121
# Make port 8000 available to the world outside this container
EXPOSE 8000
# Run app.py when the container launches
CMD ["uvicorn", "api_server:app", "--host", "0.0.0.0", "--port", "8000"]