forked from Fannovel16/comfyui_controlnet_aux
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.example.yaml
21 lines (21 loc) · 1.64 KB
/
config.example.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# this is an example for config.yaml file, you can rename it to config.yaml if you want to use it
# ###############################################################################################
# you can also use absolute paths like: "/root/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts" or "D:\\comfyui\\custom_nodes\\comfyui_controlnet_aux\\ckpts"
annotator_ckpts_path: "C:\\SDXL2\\Data\\Models\\ControlNet"
# ###############################################################################################
# if you already have downloaded ckpts via huggingface hub into default cache path like: ~/.cache/huggingface/hub, you can set this True to use symlinks to save space
# ###############################################################################################
# EP_list is a list of execution providers for onnxruntime, if one of them is not available or not working well, you can delete that provider from here(config.yaml)
# you can find all available providers here: https://onnxruntime.ai/docs/execution-providers
# for example, if you have CUDA installed, you can set it to: ["CUDAExecutionProvider", "CPUExecutionProvider"]
# empty list or only keep ["CPUExecutionProvider"] means you use cv2.dnn.readNetFromONNX to load onnx models
# if your onnx models can only run on the CPU or have other issues, we recommend using pt model instead.
# default value is ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"]
EP_list:
[
"CUDAExecutionProvider",
"DirectMLExecutionProvider",
"OpenVINOExecutionProvider",
"ROCMExecutionProvider",
"CPUExecutionProvider",
]