Skip to content

Commit

Permalink
add docker support
Browse files Browse the repository at this point in the history
Signed-off-by: Vladimir Mandic <[email protected]>
  • Loading branch information
vladmandic committed Nov 13, 2024
1 parent 880f6f6 commit 8d50661
Show file tree
Hide file tree
Showing 8 changed files with 71 additions and 6 deletions.
26 changes: 26 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# defaults
.history
.vscode/
/__pycache__
/.ruff_cache
/cache
/cache.json
/config.json
/extensions/*
/html/extensions.json
/html/themes.json
/metadata.json
/node_modules
/outputs/*
/package-lock.json
/params.txt
/pnpm-lock.yaml
/styles.csv
/tmp
/ui-config.json
/user.css
/venv
/webui-user.bat
/webui-user.sh
/*.log.*
/*.log
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,3 @@ dist/
!/models/VAE-approx/model.pt
!/models/Reference
!/models/Reference/**/*

2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ First, a massive update to docs including new UI top-level **info** tab with acc
- [MiaoshouAI PromptGen v2.0](https://huggingface.co/MiaoshouAI/Florence-2-base-PromptGen-v2.0) VQA captioning

**Workflow Improvements**:
- Native Docker support
- SD3x: ControlNets and all-in-one-safetensors
- XYZ grid: benchmarking, video creation, etc.
- Enhanced prompt parsing
Expand Down Expand Up @@ -74,6 +75,7 @@ And quite a few more improvements and fixes since the last update - for full det
- *note*: enable *bnb* on-the-fly quantization for even bigger gains

- Workflow improvements:
- Native Docker support with pre-defined [Dockerfile](https://github.com/vladmandic/automatic/blob/dev/Dockerfile)
- XYZ grid:
- optional time benchmark info to individual images
- optional add params to individual images
Expand Down
28 changes: 28 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# SD.Next Dockerfile
FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime
# TBD add org info
LABEL org.opencontainers.image.authors="vladmandic"
WORKDIR /
COPY . .
# stop pip and uv from caching
ENV PIP_NO_CACHE_DIR=true
ENV UV_NO_CACHE=true
# disable model hashing for faster startup
ENV SD_NOHASHING=true
# set data directories
ENV SD_DATADIR="/mnt/data"
ENV SD_MODELSDIR="/mnt/models"
# install dependencies
RUN ["apt-get", "-y", "update"]
RUN ["apt-get", "-y", "install", "git"]
# sdnext will run all necessary pip install ops and then exit
RUN ["python", "launch.py", "--debug", "--uv", "--use-cuda", "--log", "sdnext.log", "--test"]
# preinstall additional packages to avoid installation during runtime
RUN ["uv", "pip", "install", "-r", "requirements-extra.txt"]
# actually run sdnext
CMD ["python", "launch.py", "--debug", "--skip-all", "--listen", "--quick", "--api-log", "--log", "sdnext.log"]
# expose port
EXPOSE 7860
# TBD add healthcheck function
HEALTHCHECK NONE
STOPSIGNAL SIGINT
2 changes: 1 addition & 1 deletion cli/api-txt2img.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
const fs = require('fs'); // eslint-disable-line no-undef
const process = require('process'); // eslint-disable-line no-undef

const sd_url = process.env.SDAPI_URL || 'http://127.0.0.1:7860';
const sd_url = process.env.SDAPI_URL || 'http://127.0.0.1:32769';
const sd_username = process.env.SDAPI_USR;
const sd_password = process.env.SDAPI_PWD;
const sd_options = {
Expand Down
12 changes: 10 additions & 2 deletions installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,8 @@ def check_python(supported_minors=[9, 10, 11, 12], reason=None):

# check diffusers version
def check_diffusers():
if args.skip_all or args.skip_requirements:
return
sha = 'dac623b59f52c58383a39207d5147aa34e0047cd'
pkg = pkg_resources.working_set.by_key.get('diffusers', None)
minor = int(pkg.version.split('.')[1] if pkg is not None else 0)
Expand All @@ -474,13 +476,17 @@ def check_diffusers():

# check onnx version
def check_onnx():
if args.skip_all or args.skip_requirements:
return
if not installed('onnx', quiet=True):
install('onnx', 'onnx', ignore=True)
if not installed('onnxruntime', quiet=True) and not (installed('onnxruntime-gpu', quiet=True) or installed('onnxruntime-openvino', quiet=True) or installed('onnxruntime-training', quiet=True)): # allow either
install('onnxruntime', 'onnxruntime', ignore=True)


def check_torchao():
if args.skip_all or args.skip_requirements:
return
if installed('torchao', quiet=True):
ver = package_version('torchao')
if ver != '0.5.0':
Expand All @@ -492,14 +498,16 @@ def check_torchao():

def install_cuda():
log.info('CUDA: nVidia toolkit detected')
install('onnxruntime-gpu', 'onnxruntime-gpu', ignore=True, quiet=True)
if not (args.skip_all or args.skip_requirements):
install('onnxruntime-gpu', 'onnxruntime-gpu', ignore=True, quiet=True)
# return os.environ.get('TORCH_COMMAND', 'torch torchvision --index-url https://download.pytorch.org/whl/cu124')
return os.environ.get('TORCH_COMMAND', 'torch==2.5.1+cu124 torchvision==0.20.1+cu124 --index-url https://download.pytorch.org/whl/cu124')


def install_rocm_zluda():
if args.skip_all or args.skip_requirements:
return
from modules import rocm

if not rocm.is_installed:
log.warning('ROCm: could not find ROCm toolkit installed')
log.info('Using CPU-only torch')
Expand Down
4 changes: 2 additions & 2 deletions launch.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,13 +204,13 @@ def main():
installer.check_python()
if args.reset:
installer.git_reset()
if args.skip_git:
if args.skip_git or args.skip_all:
installer.log.info('Skipping GIT operations')
installer.check_version()
installer.log.info(f'Platform: {installer.print_dict(installer.get_platform())}')
installer.check_venv()
installer.log.info(f'Args: {sys.argv[1:]}')
if not args.skip_env:
if not args.skip_env or args.skip_all:
installer.set_environment()
if args.uv:
installer.install("uv", "uv")
Expand Down
2 changes: 2 additions & 0 deletions modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@
dir_timestamps = {}
dir_cache = {}
max_workers = 8
if os.environ.get("SD_HFCACHEDIR", None) is not None:
hfcache_dir = os.environ.get("SD_HFCACHEDIR")
if os.environ.get("HF_HUB_CACHE", None) is not None:
hfcache_dir = os.environ.get("HF_HUB_CACHE")
elif os.environ.get("HF_HUB", None) is not None:
Expand Down

0 comments on commit 8d50661

Please sign in to comment.