diff --git a/docs/poetry.lock b/docs/poetry.lock
index 2249f818d02..b118d012b30 100644
--- a/docs/poetry.lock
+++ b/docs/poetry.lock
@@ -2,13 +2,13 @@
[[package]]
name = "alabaster"
-version = "0.7.16"
+version = "1.0.0"
description = "A light, configurable Sphinx theme"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
files = [
- {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"},
- {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"},
+ {file = "alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b"},
+ {file = "alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e"},
]
[[package]]
@@ -47,6 +47,24 @@ files = [
[package.extras]
dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
+[[package]]
+name = "beartype"
+version = "0.19.0"
+description = "Unbearably fast near-real-time hybrid runtime-static type-checking in pure Python."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "beartype-0.19.0-py3-none-any.whl", hash = "sha256:33b2694eda0daf052eb2aff623ed9a8a586703bbf0a90bbc475a83bbf427f699"},
+ {file = "beartype-0.19.0.tar.gz", hash = "sha256:de42dfc1ba5c3710fde6c3002e3bd2cad236ed4d2aabe876345ab0b4234a6573"},
+]
+
+[package.extras]
+dev = ["autoapi (>=0.9.0)", "coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"]
+doc-rtd = ["autoapi (>=0.9.0)", "pydata-sphinx-theme (<=0.7.2)", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)"]
+test = ["coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"]
+test-tox = ["equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "typing-extensions (>=3.10.0.0)"]
+test-tox-coverage = ["coverage (>=5.5)"]
+
[[package]]
name = "beautifulsoup4"
version = "4.12.3"
@@ -422,22 +440,22 @@ files = [
[[package]]
name = "myst-parser"
-version = "3.0.1"
+version = "4.0.0"
description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser,"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.10"
files = [
- {file = "myst_parser-3.0.1-py3-none-any.whl", hash = "sha256:6457aaa33a5d474aca678b8ead9b3dc298e89c68e67012e73146ea6fd54babf1"},
- {file = "myst_parser-3.0.1.tar.gz", hash = "sha256:88f0cb406cb363b077d176b51c476f62d60604d68a8dcdf4832e080441301a87"},
+ {file = "myst_parser-4.0.0-py3-none-any.whl", hash = "sha256:b9317997552424448c6096c2558872fdb6f81d3ecb3a40ce84a7518798f3f28d"},
+ {file = "myst_parser-4.0.0.tar.gz", hash = "sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531"},
]
[package.dependencies]
-docutils = ">=0.18,<0.22"
+docutils = ">=0.19,<0.22"
jinja2 = "*"
markdown-it-py = ">=3.0,<4.0"
-mdit-py-plugins = ">=0.4,<1.0"
+mdit-py-plugins = ">=0.4.1,<1.0"
pyyaml = "*"
-sphinx = ">=6,<8"
+sphinx = ">=7,<9"
[package.extras]
code-style = ["pre-commit (>=3.0,<4.0)"]
@@ -448,13 +466,13 @@ testing-docutils = ["pygments", "pytest (>=8,<9)", "pytest-param-files (>=0.6.0,
[[package]]
name = "packaging"
-version = "24.1"
+version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
- {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
+ {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
[[package]]
@@ -574,13 +592,13 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "rich"
-version = "13.9.3"
+version = "13.9.4"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "rich-13.9.3-py3-none-any.whl", hash = "sha256:9836f5096eb2172c9e77df411c1b009bace4193d6a481d534fea75ebba758283"},
- {file = "rich-13.9.3.tar.gz", hash = "sha256:bc1e01b899537598cf02579d2b9f4a415104d3fc439313a7a2c165d76557a08e"},
+ {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"},
+ {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"},
]
[package.dependencies]
@@ -593,23 +611,23 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
[[package]]
name = "setuptools"
-version = "75.2.0"
+version = "75.4.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"},
- {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"},
+ {file = "setuptools-75.4.0-py3-none-any.whl", hash = "sha256:b3c5d862f98500b06ffdf7cc4499b48c46c317d8d56cb30b5c8bce4d88f5c216"},
+ {file = "setuptools-75.4.0.tar.gz", hash = "sha256:1dc484f5cf56fd3fe7216d7b8df820802e7246cfb534a1db2aa64f14fcb9cdcb"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
-core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.7.0)"]
+core = ["importlib-metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
-type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12,<1.14)", "pytest-mypy"]
[[package]]
name = "shellingham"
@@ -657,17 +675,17 @@ files = [
[[package]]
name = "sphinx"
-version = "7.4.7"
+version = "8.1.3"
description = "Python documentation generator"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
files = [
- {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"},
- {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"},
+ {file = "sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2"},
+ {file = "sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927"},
]
[package.dependencies]
-alabaster = ">=0.7.14,<0.8.0"
+alabaster = ">=0.7.14"
babel = ">=2.13"
colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""}
docutils = ">=0.20,<0.22"
@@ -677,17 +695,17 @@ packaging = ">=23.0"
Pygments = ">=2.17"
requests = ">=2.30.0"
snowballstemmer = ">=2.2"
-sphinxcontrib-applehelp = "*"
-sphinxcontrib-devhelp = "*"
-sphinxcontrib-htmlhelp = ">=2.0.0"
-sphinxcontrib-jsmath = "*"
-sphinxcontrib-qthelp = "*"
+sphinxcontrib-applehelp = ">=1.0.7"
+sphinxcontrib-devhelp = ">=1.0.6"
+sphinxcontrib-htmlhelp = ">=2.0.6"
+sphinxcontrib-jsmath = ">=1.0.1"
+sphinxcontrib-qthelp = ">=1.0.6"
sphinxcontrib-serializinghtml = ">=1.1.9"
tomli = {version = ">=2", markers = "python_version < \"3.11\""}
[package.extras]
docs = ["sphinxcontrib-websupport"]
-lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"]
+lint = ["flake8 (>=6.0)", "mypy (==1.11.1)", "pyright (==1.1.384)", "pytest (>=6.0)", "ruff (==0.6.9)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-Pillow (==10.2.0.20240822)", "types-Pygments (==2.18.0.20240506)", "types-colorama (==0.4.15.20240311)", "types-defusedxml (==0.7.0.20240218)", "types-docutils (==0.21.0.20241005)", "types-requests (==2.32.0.20240914)", "types-urllib3 (==1.26.25.14)"]
test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"]
[[package]]
@@ -750,26 +768,28 @@ rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"]
[[package]]
name = "sphinx-design"
-version = "0.5.0"
+version = "0.6.1"
description = "A sphinx extension for designing beautiful, view size responsive web components."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "sphinx_design-0.5.0-py3-none-any.whl", hash = "sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e"},
- {file = "sphinx_design-0.5.0.tar.gz", hash = "sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00"},
+ {file = "sphinx_design-0.6.1-py3-none-any.whl", hash = "sha256:b11f37db1a802a183d61b159d9a202314d4d2fe29c163437001324fe2f19549c"},
+ {file = "sphinx_design-0.6.1.tar.gz", hash = "sha256:b44eea3719386d04d765c1a8257caca2b3e6f8421d7b3a5e742c0fd45f84e632"},
]
[package.dependencies]
-sphinx = ">=5,<8"
+sphinx = ">=6,<9"
[package.extras]
code-style = ["pre-commit (>=3,<4)"]
-rtd = ["myst-parser (>=1,<3)"]
-testing = ["myst-parser (>=1,<3)", "pytest (>=7.1,<8.0)", "pytest-cov", "pytest-regressions"]
-theme-furo = ["furo (>=2023.7.0,<2023.8.0)"]
-theme-pydata = ["pydata-sphinx-theme (>=0.13.0,<0.14.0)"]
-theme-rtd = ["sphinx-rtd-theme (>=1.0,<2.0)"]
-theme-sbt = ["sphinx-book-theme (>=1.0,<2.0)"]
+rtd = ["myst-parser (>=2,<4)"]
+testing = ["defusedxml", "myst-parser (>=2,<4)", "pytest (>=8.3,<9.0)", "pytest-cov", "pytest-regressions"]
+testing-no-myst = ["defusedxml", "pytest (>=8.3,<9.0)", "pytest-cov", "pytest-regressions"]
+theme-furo = ["furo (>=2024.7.18,<2024.8.0)"]
+theme-im = ["sphinx-immaterial (>=0.12.2,<0.13.0)"]
+theme-pydata = ["pydata-sphinx-theme (>=0.15.2,<0.16.0)"]
+theme-rtd = ["sphinx-rtd-theme (>=2.0,<3.0)"]
+theme-sbt = ["sphinx-book-theme (>=1.1,<2.0)"]
[[package]]
name = "sphinx-multiversion-scylla"
@@ -820,7 +840,7 @@ setuptools = ">=70.1.1,<76.0.0"
sphinx-collapse = ">=0.1.1,<0.2.0"
sphinx-copybutton = ">=0.5.2,<0.6.0"
sphinx-notfound-page = ">=1.0.4,<2.0.0"
-Sphinx-Substitution-Extensions = ">=2022.2.16,<2023.0.0"
+Sphinx-Substitution-Extensions = ">=2022.2.16"
sphinx-tabs = ">=3.4.5,<4.0.0"
[[package]]
@@ -842,13 +862,13 @@ dev = ["build", "flake8", "pre-commit", "pytest", "sphinx", "tox"]
[[package]]
name = "sphinx-substitution-extensions"
-version = "2022.2.16"
+version = "2024.10.17"
description = "Extensions for Sphinx which allow for substitutions."
optional = false
python-versions = "*"
files = [
- {file = "Sphinx Substitution Extensions-2022.2.16.tar.gz", hash = "sha256:ff7d05bd00e8b2d7eb8a403b9f317d70411d4e9b6812bf91534a50df22190c75"},
- {file = "Sphinx_Substitution_Extensions-2022.2.16-py3-none-any.whl", hash = "sha256:5a8ca34dac3984486344e95c36e3ed4766d402a71bdee7390d600f153db9795b"},
+ {file = "Sphinx Substitution Extensions-2024.10.17.tar.gz", hash = "sha256:30fa723bb44afe23adc5187601dcf0c1955dcd1b26f588773b395e852076179d"},
+ {file = "Sphinx_Substitution_Extensions-2024.10.17-py3-none-any.whl", hash = "sha256:6c1aae08edea616c5602f30368e6c15205220aa2d0b37bc47569322bce5e1271"},
]
[package.dependencies]
@@ -992,24 +1012,24 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
[[package]]
name = "tomli"
-version = "2.0.2"
+version = "2.1.0"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
- {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
+ {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
+ {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
]
[[package]]
name = "typer"
-version = "0.12.5"
+version = "0.13.0"
description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
optional = false
python-versions = ">=3.7"
files = [
- {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"},
- {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"},
+ {file = "typer-0.13.0-py3-none-any.whl", hash = "sha256:d85fe0b777b2517cc99c8055ed735452f2659cd45e451507c76f48ce5c1d00e2"},
+ {file = "typer-0.13.0.tar.gz", hash = "sha256:f1c7198347939361eec90139ffa0fd8b3df3a2259d5852a0f7400e476d95985c"},
]
[package.dependencies]
@@ -1162,100 +1182,83 @@ anyio = ">=3.0.0"
[[package]]
name = "websockets"
-version = "13.1"
+version = "14.1"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"},
- {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"},
- {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"},
- {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"},
- {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"},
- {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"},
- {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"},
- {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"},
- {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"},
- {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"},
- {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"},
- {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"},
- {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"},
- {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"},
- {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"},
- {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"},
- {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"},
- {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"},
- {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"},
- {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"},
- {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"},
- {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"},
- {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"},
- {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"},
- {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"},
- {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"},
- {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"},
- {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"},
- {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"},
- {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"},
- {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"},
- {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"},
- {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"},
- {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"},
- {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"},
- {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"},
- {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"},
- {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"},
- {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"},
- {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"},
- {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"},
- {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"},
- {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"},
- {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"},
- {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"},
- {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"},
- {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"},
- {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"},
- {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"},
- {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"},
- {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"},
- {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"},
- {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"},
- {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"},
- {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"},
- {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"},
- {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"},
- {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"},
- {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"},
- {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"},
- {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"},
- {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"},
- {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"},
- {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"},
- {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"},
- {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"},
- {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"},
- {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"},
- {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"},
- {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"},
- {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"},
- {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"},
- {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"},
- {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"},
- {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"},
- {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"},
- {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"},
- {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"},
- {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"},
- {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"},
- {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"},
- {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"},
- {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"},
- {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"},
- {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"},
- {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"},
+ {file = "websockets-14.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a0adf84bc2e7c86e8a202537b4fd50e6f7f0e4a6b6bf64d7ccb96c4cd3330b29"},
+ {file = "websockets-14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90b5d9dfbb6d07a84ed3e696012610b6da074d97453bd01e0e30744b472c8179"},
+ {file = "websockets-14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2177ee3901075167f01c5e335a6685e71b162a54a89a56001f1c3e9e3d2ad250"},
+ {file = "websockets-14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f14a96a0034a27f9d47fd9788913924c89612225878f8078bb9d55f859272b0"},
+ {file = "websockets-14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f874ba705deea77bcf64a9da42c1f5fc2466d8f14daf410bc7d4ceae0a9fcb0"},
+ {file = "websockets-14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9607b9a442392e690a57909c362811184ea429585a71061cd5d3c2b98065c199"},
+ {file = "websockets-14.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bea45f19b7ca000380fbd4e02552be86343080120d074b87f25593ce1700ad58"},
+ {file = "websockets-14.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:219c8187b3ceeadbf2afcf0f25a4918d02da7b944d703b97d12fb01510869078"},
+ {file = "websockets-14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ad2ab2547761d79926effe63de21479dfaf29834c50f98c4bf5b5480b5838434"},
+ {file = "websockets-14.1-cp310-cp310-win32.whl", hash = "sha256:1288369a6a84e81b90da5dbed48610cd7e5d60af62df9851ed1d1d23a9069f10"},
+ {file = "websockets-14.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0744623852f1497d825a49a99bfbec9bea4f3f946df6eb9d8a2f0c37a2fec2e"},
+ {file = "websockets-14.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:449d77d636f8d9c17952628cc7e3b8faf6e92a17ec581ec0c0256300717e1512"},
+ {file = "websockets-14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a35f704be14768cea9790d921c2c1cc4fc52700410b1c10948511039be824aac"},
+ {file = "websockets-14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b1f3628a0510bd58968c0f60447e7a692933589b791a6b572fcef374053ca280"},
+ {file = "websockets-14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c3deac3748ec73ef24fc7be0b68220d14d47d6647d2f85b2771cb35ea847aa1"},
+ {file = "websockets-14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7048eb4415d46368ef29d32133134c513f507fff7d953c18c91104738a68c3b3"},
+ {file = "websockets-14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cf0ad281c979306a6a34242b371e90e891bce504509fb6bb5246bbbf31e7b6"},
+ {file = "websockets-14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cc1fc87428c1d18b643479caa7b15db7d544652e5bf610513d4a3478dbe823d0"},
+ {file = "websockets-14.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f95ba34d71e2fa0c5d225bde3b3bdb152e957150100e75c86bc7f3964c450d89"},
+ {file = "websockets-14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9481a6de29105d73cf4515f2bef8eb71e17ac184c19d0b9918a3701c6c9c4f23"},
+ {file = "websockets-14.1-cp311-cp311-win32.whl", hash = "sha256:368a05465f49c5949e27afd6fbe0a77ce53082185bbb2ac096a3a8afaf4de52e"},
+ {file = "websockets-14.1-cp311-cp311-win_amd64.whl", hash = "sha256:6d24fc337fc055c9e83414c94e1ee0dee902a486d19d2a7f0929e49d7d604b09"},
+ {file = "websockets-14.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed907449fe5e021933e46a3e65d651f641975a768d0649fee59f10c2985529ed"},
+ {file = "websockets-14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:87e31011b5c14a33b29f17eb48932e63e1dcd3fa31d72209848652310d3d1f0d"},
+ {file = "websockets-14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bc6ccf7d54c02ae47a48ddf9414c54d48af9c01076a2e1023e3b486b6e72c707"},
+ {file = "websockets-14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9777564c0a72a1d457f0848977a1cbe15cfa75fa2f67ce267441e465717dcf1a"},
+ {file = "websockets-14.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a655bde548ca98f55b43711b0ceefd2a88a71af6350b0c168aa77562104f3f45"},
+ {file = "websockets-14.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3dfff83ca578cada2d19e665e9c8368e1598d4e787422a460ec70e531dbdd58"},
+ {file = "websockets-14.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6a6c9bcf7cdc0fd41cc7b7944447982e8acfd9f0d560ea6d6845428ed0562058"},
+ {file = "websockets-14.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4b6caec8576e760f2c7dd878ba817653144d5f369200b6ddf9771d64385b84d4"},
+ {file = "websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05"},
+ {file = "websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0"},
+ {file = "websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f"},
+ {file = "websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9"},
+ {file = "websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b"},
+ {file = "websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3"},
+ {file = "websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59"},
+ {file = "websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2"},
+ {file = "websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da"},
+ {file = "websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9"},
+ {file = "websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7"},
+ {file = "websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a"},
+ {file = "websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6"},
+ {file = "websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0"},
+ {file = "websockets-14.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01bb2d4f0a6d04538d3c5dfd27c0643269656c28045a53439cbf1c004f90897a"},
+ {file = "websockets-14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:414ffe86f4d6f434a8c3b7913655a1a5383b617f9bf38720e7c0799fac3ab1c6"},
+ {file = "websockets-14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fda642151d5affdee8a430bd85496f2e2517be3a2b9d2484d633d5712b15c56"},
+ {file = "websockets-14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd7c11968bc3860d5c78577f0dbc535257ccec41750675d58d8dc66aa47fe52c"},
+ {file = "websockets-14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a032855dc7db987dff813583d04f4950d14326665d7e714d584560b140ae6b8b"},
+ {file = "websockets-14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7e7ea2f782408c32d86b87a0d2c1fd8871b0399dd762364c731d86c86069a78"},
+ {file = "websockets-14.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:39450e6215f7d9f6f7bc2a6da21d79374729f5d052333da4d5825af8a97e6735"},
+ {file = "websockets-14.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ceada5be22fa5a5a4cdeec74e761c2ee7db287208f54c718f2df4b7e200b8d4a"},
+ {file = "websockets-14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3fc753451d471cff90b8f467a1fc0ae64031cf2d81b7b34e1811b7e2691bc4bc"},
+ {file = "websockets-14.1-cp39-cp39-win32.whl", hash = "sha256:14839f54786987ccd9d03ed7f334baec0f02272e7ec4f6e9d427ff584aeea8b4"},
+ {file = "websockets-14.1-cp39-cp39-win_amd64.whl", hash = "sha256:d9fd19ecc3a4d5ae82ddbfb30962cf6d874ff943e56e0c81f5169be2fda62979"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e5dc25a9dbd1a7f61eca4b7cb04e74ae4b963d658f9e4f9aad9cd00b688692c8"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:04a97aca96ca2acedf0d1f332c861c5a4486fdcba7bcef35873820f940c4231e"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df174ece723b228d3e8734a6f2a6febbd413ddec39b3dc592f5a4aa0aff28098"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:034feb9f4286476f273b9a245fb15f02c34d9586a5bc936aff108c3ba1b21beb"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c308dabd2b380807ab64b62985eaccf923a78ebc572bd485375b9ca2b7dc7"},
+ {file = "websockets-14.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a42d3ecbb2db5080fc578314439b1d79eef71d323dc661aa616fb492436af5d"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ddaa4a390af911da6f680be8be4ff5aaf31c4c834c1a9147bc21cbcbca2d4370"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a4c805c6034206143fbabd2d259ec5e757f8b29d0a2f0bf3d2fe5d1f60147a4a"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:205f672a6c2c671a86d33f6d47c9b35781a998728d2c7c2a3e1cf3333fcb62b7"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef440054124728cc49b01c33469de06755e5a7a4e83ef61934ad95fc327fbb0"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7591d6f440af7f73c4bd9404f3772bfee064e639d2b6cc8c94076e71b2471c1"},
+ {file = "websockets-14.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:25225cc79cfebc95ba1d24cd3ab86aaa35bcd315d12fa4358939bd55e9bd74a5"},
+ {file = "websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e"},
+ {file = "websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8"},
]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "cb08c82fb199b201cf49b83848c7281beac4b74daa314caef14d2ef812b20fcd"
+content-hash = "77d8c573bcf2670232ba1cb2b0df6c20463396ea9e26c017a9863594872f2734"
diff --git a/docs/pyproject.toml b/docs/pyproject.toml
index 62854f3e9c4..7ab9f8848e4 100644
--- a/docs/pyproject.toml
+++ b/docs/pyproject.toml
@@ -10,13 +10,15 @@ package-mode = false
python = "^3.10"
pygments = "^2.18.0"
sphinx-scylladb-theme = "^1.8.1"
+#sphinx-substitution-extensions = "=2024.10.17"
sphinx-sitemap = "^2.6.0"
+beartype = ">0.0.0"
sphinx-autobuild = "^2024.4.19"
-Sphinx = "^7.3.7"
+Sphinx = "^8.1.3"
sphinx-multiversion-scylla = "^0.3.1"
redirects_cli ="^0.1.3"
-myst-parser = "^3.0.1"
-sphinx-design = "^0.5.0"
+myst-parser = "^4.0.0"
+sphinx-design = "^0.6.1"
[build-system]
requires = ["poetry>=1.8.0"]
diff --git a/docs/source/.internal/helm-crd-warning.md b/docs/source/.internal/helm-crd-warning.md
new file mode 100644
index 00000000000..e22af94231a
--- /dev/null
+++ b/docs/source/.internal/helm-crd-warning.md
@@ -0,0 +1,5 @@
+:::{warning}
+Helm doesn't support managing CustomResourceDefinition resources ([#5871](https://github.com/helm/helm/issues/5871), [#7735](https://github.com/helm/helm/issues/7735)).
+Helm only creates CRDs on the first install and never updates them, while keeping the CRDs up to date (with any update) is absolutely essential.
+In order to update them, users have to do it manually every time.
+:::
diff --git a/docs/source/.internal/manager-license-note.md b/docs/source/.internal/manager-license-note.md
new file mode 100644
index 00000000000..caa85bd1f7d
--- /dev/null
+++ b/docs/source/.internal/manager-license-note.md
@@ -0,0 +1,5 @@
+:::{note}
+ScyllaDB Manager is available for ScyllaDB Enterprise customers and ScyllaDB Open Source users.
+With ScyllaDB Open Source, ScyllaDB Manager is limited to 5 nodes.
+See the ScyllaDB Manager [Proprietary Software License Agreement](https://www.scylladb.com/scylla-manager-software-license-agreement/) for details.
+:::
diff --git a/docs/source/.internal/tuning-warning.md b/docs/source/.internal/tuning-warning.md
new file mode 100644
index 00000000000..4401370bc05
--- /dev/null
+++ b/docs/source/.internal/tuning-warning.md
@@ -0,0 +1,4 @@
+:::{warning}
+We recommend that you first try out the performance tuning on a pre-production instance.
+Given the nature of the underlying tuning script, undoing the changes requires rebooting the Kubernetes node(s).
+:::
diff --git a/docs/source/architecture/components-cluster_scoped.svg b/docs/source/architecture/components-cluster_scoped.svg
new file mode 100644
index 00000000000..60b505fd139
--- /dev/null
+++ b/docs/source/architecture/components-cluster_scoped.svg
@@ -0,0 +1,424 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/architecture/components-namespaced.svg b/docs/source/architecture/components-namespaced.svg
new file mode 100644
index 00000000000..a53ee2194ed
--- /dev/null
+++ b/docs/source/architecture/components-namespaced.svg
@@ -0,0 +1,248 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/architecture/components.odg b/docs/source/architecture/components.odg
new file mode 100644
index 00000000000..4fc13199bcf
Binary files /dev/null and b/docs/source/architecture/components.odg differ
diff --git a/docs/source/architecture/deploy.odg b/docs/source/architecture/deploy.odg
new file mode 100644
index 00000000000..811c14ab324
Binary files /dev/null and b/docs/source/architecture/deploy.odg differ
diff --git a/docs/source/architecture/index.md b/docs/source/architecture/index.md
new file mode 100644
index 00000000000..0a55ce8f076
--- /dev/null
+++ b/docs/source/architecture/index.md
@@ -0,0 +1,10 @@
+# Architecture
+
+:::{toctree}
+:maxdepth: 1
+
+overview
+storage/index
+tuning
+manager
+:::
diff --git a/docs/source/architecture/manager.md b/docs/source/architecture/manager.md
new file mode 100644
index 00000000000..7584d3dc4ff
--- /dev/null
+++ b/docs/source/architecture/manager.md
@@ -0,0 +1,25 @@
+# ScyllaDB Manager
+
+{{productName}} has a basic integration with ScyllaDB Manager. At this point there is one global ScyllaDBManager instance that manages all [ScyllaClusters](../resources/scyllaclusters/overview.md) and a corresponding controller that automatically configures the ScyllaDB Manager to monitor the ScyllaDB instances and sync repair and backup tasks based on ScyllaCLuster definition. Unfortunately, the rest of the functionality is not yet implemented in ScyllaCluster APIs and e.g. a restore of a cluster from a backup needs to be performed by executing into the shared ScyllaDB manager deployment and using `sctool` directly by an administrator.
+
+:::{caution}
+Because ScyllaDB manager instance is shared by all users and their ScyllaClusters, only administrators should have privileges to access the `scylla-manager` namespace.
+:::
+
+
+:::{include} ../.internal/manager-license-note.md
+:::
+
+## Accessing ScyllaDB Manager
+
+For the operations that are not yet supported on ScyllaClusters, you can access the ScyllaDB manager manually.
+
+First, you need to find the ScyllaDB Maanager ID for your cluster:
+
+:::{code-block} bash
+kubectl -n=test get scyllacluster/scylla --template='{{ .status.managerId }}'
+:::
+
+:::{note}
+Note that some of the operations use *ScyllaDB Manager Agent* that runs within the ScyllaCluster that has to have access e.g. to buckets being used.
+:::
diff --git a/docs/source/architecture/overview.md b/docs/source/architecture/overview.md
new file mode 100644
index 00000000000..a3af933c4ec
--- /dev/null
+++ b/docs/source/architecture/overview.md
@@ -0,0 +1,31 @@
+# Overview
+
+## Foreword
+
+{{productName}} is a set of controllers and API extensions that need to be installed in your cluster.
+The Kubernetes API is extended using [CustomResourceDefinitions (CRDs)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) and [dynamic admission webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) to provide new resources ([API reference](/api-reference/index)).
+These resources are reconciled by controllers embedded within {{productName}} deployment.
+
+ScyllaDB is a stateful application and {{productName}} requires you to have a storage provisioner installed in your cluster.
+To achieve the best performance, we recommend using a storage provisioner based on local NVMEs.
+You can learn more about different setups in [a dedicated storage section](./storage/overview.md).
+
+## Components
+
+{{productName}} deployment consists of several components that needs to be installed / present in you Kubernetes cluster.
+By design, some of the components need elevated permissions, but they are only accessible to the administrators.
+
+
+### Cluster scoped
+```{image} ./components-cluster_scoped.svg
+:name: components-cluster-scoped
+:align: center
+:scale: 75%
+```
+
+### Namespaced
+```{image} ./components-namespaced.svg
+:name: components-namespaced
+:align: center
+:scale: 75%
+```
diff --git a/docs/source/architecture/storage/index.md b/docs/source/architecture/storage/index.md
new file mode 100644
index 00000000000..f7042662e7f
--- /dev/null
+++ b/docs/source/architecture/storage/index.md
@@ -0,0 +1,8 @@
+# Storage
+
+:::{toctree}
+:maxdepth: 1
+
+overview
+local-csi-driver
+:::
diff --git a/docs/source/architecture/storage/local-csi-driver.md b/docs/source/architecture/storage/local-csi-driver.md
new file mode 100644
index 00000000000..9928205896d
--- /dev/null
+++ b/docs/source/architecture/storage/local-csi-driver.md
@@ -0,0 +1,14 @@
+# Local CSI Driver
+
+## About
+
+The Local Volume Provisioner implements the [Container Storage Interface (CSI)](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/), a specification for container orchestrators to manage the lifecycle of volumes.
+
+It supports dynamic provisioning on local disks, so storage volumes can be created on-demand through managed directories on the local disk.
+
+## CSI Features
+
+Local CSI Driver implements the following CSI features:
+- Controller Service
+- Node Service
+- Identity Service
diff --git a/docs/source/architecture/storage/overview.md b/docs/source/architecture/storage/overview.md
new file mode 100644
index 00000000000..13beb8ad1c9
--- /dev/null
+++ b/docs/source/architecture/storage/overview.md
@@ -0,0 +1,23 @@
+# Overview
+
+ScyllaDB works with both local and network attached storage provisioners, but our primary focus is around the local storage that can provide the best performance. We support using 2 local provisioners: [ScyllaDB Local CSI Driver](https://github.com/scylladb/local-csi-driver) and [Kubernetes SIG Storage Local Persistence Volume Static Provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner).
+
+## Setting up local disks
+
+When a Kubernetes node having local disk(s) is created, the storage is usually uninitialized. This heavily depends on you platform and its options, but even when provisioned with mounted disks, they usually don't set up *RAID*, nor have the option to choose a file system type. (ScyllaDB needs the storage to be formatted with `xfs`.)
+
+Setting up the RAID arrays, formatting the file system or mounting it in a declarative manner is challenging and that's one of the reasons we have created the [NodeConfig](../../resources/nodeconfigs.md) custom resource.
+
+## Supported local provisioners
+
+### ScyllaDB Local CSI driver
+
+ScyllaDB Local CSI Driver supports dynamic provisioning on local disks and sharing the storage capacity.
+It is based on dynamic directories and `xfs` `prjquota`.
+It allows [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) to be created dynamically for a corresponding [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) by automatically provisioning directories created on disks attached to instances. On supported filesystems, directories have quota limitations to ensure volume size limits.
+
+At this point, the Local CSI Driver doesn't know how to provision block devices which would give you a more dedicated and isolated dynamic storage.
+
+### Kubernetes SIG Storage Static Local Volume Provisioner
+
+The local volume static provisioner is a Kubernetes SIG Storage project that can turn your disks into dedicated and isolated Persistent volumes but all of them have to be created manually.
diff --git a/docs/source/architecture/tuning.md b/docs/source/architecture/tuning.md
new file mode 100644
index 00000000000..b2a1cb51639
--- /dev/null
+++ b/docs/source/architecture/tuning.md
@@ -0,0 +1,36 @@
+# Tuning
+
+ScyllaDB works best when it's pinned to the CPUs and not interrupted.
+To get the best performance and latency {{productName}}
+
+One of the most common causes of context-switching are network interrupts.
+Packets coming to a Kubernetes node need to be processed which requires using CPU shares.
+
+On K8s we always have at least a couple of processes running on the node: kubelet, kubernetes provider applications, daemons etc.
+These processes require CPU shares, so we cannot dedicate entire node processing power to Scylla, we need to leave space for others.
+We take advantage of it, and we pin IRQs to CPUs not used by any Scylla Pods exclusively.
+
+Performance tuning is enabled by default **when you create a corresponding [NodeConfig](../resources/nodeconfigs.md) for your nodes**.
+
+Because some of the operations it needs to perform are not multitenant or require elevated privileges, the tuning scripts are run in a dedicated system namespace called `scylla-operator-node-tuning`.
+This namespace is created and entirely managed by {{productName}} and only administrators can access it.
+
+The tuning is based around `perftune` script that comes from [scyllaDBUtilsImage](../api-reference/groups/scylla.scylladb.com/scyllaoperatorconfigs.rst#api-scylla-scylladb-com-scyllaoperatorconfigs-v1alpha1-status). `perftune` executes the performance optmizations like tuning the kernel, network, disk devices, spreading IRQs across CPUs and more. Conceptually this is run in 2 parts: tuning the [Kubernetes nodes](#kubernetes-nodes) and tuning for [ScyllaDB Pods](#scylladb-pods).
+
+:::{include} ../.internal/tuning-warning.md
+:::
+
+## Kubernetes nodes
+
+`perftune` script is executed on the target nodes and tunes kernel, network, disk devices and more.
+This is executed right after the tuning is enabled using a [NodeConfig](../resources/nodeconfigs.md)
+
+## ScyllaDB Pods
+
+When a [ScyllaCluster](../resources/scyllaclusters/overview.md) Pod is created (and performance tuning is enabled), the Pod initializes but waits until {{productName}} runs an on-demand Job that will configure the host and the ScyllaDB process accordingly (e.g. spreading IRQs across other CPUs).
+Only after that it will actually start running ScyllaDB.
+
+:::{caution}
+Only Pods with [`Guaranteed` QoS class](https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#guaranteed) are eligible to be tuned, otherwise they would not have pinned CPUs.
+
+Always verify that your [ScyllaCluster](../resources/scyllaclusters/overview.md) resource specifications meat [all the criteria](https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#criteria).
diff --git a/docs/source/clients/index.rst b/docs/source/clients/index.rst
deleted file mode 100644
index 0c8c4697608..00000000000
--- a/docs/source/clients/index.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-==========================================================
-Using ScyllaDB APIs
-==========================================================
-
-.. toctree::
- :titlesonly:
- :maxdepth: 1
-
-
- discovery
- cql
- alternator
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 4826cd4accf..25008fbbd74 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -50,8 +50,18 @@
# -- Options for myst parser
-myst_enable_extensions = ["colon_fence"]
+myst_enable_extensions = ["colon_fence", "attrs_inline", "substitution"]
myst_heading_anchors = 6
+myst_substitutions = {
+ "productName": "Scylla Operator",
+ "repository": "scylladb/scylla-operator",
+ "revision": "master",
+ "imageRepository": "docker.io/scylladb/scylla",
+ "imageTag": "6.2.0",
+ "enterpriseImageRepository": "docker.io/scylladb/scylla-enterprise",
+ "enterpriseImageTag": "2024.1.12",
+ "agentVersion": "3.4.0",
+}
# -- Options for not found extension
@@ -78,7 +88,6 @@
# Defines which version is considered to be the latest stable version.
# Must be listed in smv_tag_whitelist or smv_branch_whitelist.
smv_latest_version = 'v1.14'
-smv_rename_latest_version = 'stable'
# Whitelist pattern for remotes (set to None to use local branches only)
smv_remote_whitelist = r"^origin$"
# Pattern for released versions
diff --git a/docs/source/contributing.md b/docs/source/contributing.md
deleted file mode 100644
index da5fc078732..00000000000
--- a/docs/source/contributing.md
+++ /dev/null
@@ -1,155 +0,0 @@
-# Contributing to Scylla Operator
-
-## Prerequisites
-
-To develop on scylla-operator, your environment must have the following:
-
-1. [Go 1.13](https://golang.org/dl/)
- * Make sure [GOPATH](https://github.com/golang/go/wiki/SettingGOPATH) is set to `GOPATH=$HOME/go`.
-2. [Kustomize v3.1.0](https://github.com/kubernetes-sigs/kustomize/releases/tag/v3.1.0)
-3. [kubebuilder v2.3.1](https://github.com/kubernetes-sigs/kubebuilder/releases/tag/v2.3.1)
-4. [Docker](https://docs.docker.com/install/)
-5. Git client installed
-6. Github account
-
-To install all dependencies (Go, kustomize, kubebuilder, dep), simply run:
-```bash
-./install-dependencies.sh
-```
-
-## Initial Setup
-
-### Create a Fork
-
-From your browser navigate to [http://github.com/scylladb/scylla-operator](http://github.com/scylladb/scylla-operator) and click the "Fork" button.
-
-### Clone Your Fork
-
-Open a console window and do the following:
-
-```bash
-# Create the scylla operator repo path
-mkdir -p $GOPATH/src/github.com/scylladb
-
-# Navigate to the local repo path and clone your fork
-cd $GOPATH/src/github.com/scylladb
-
-# Clone your fork, where is your GitHub account name
-git clone https://github.com//scylla-operator.git
-```
-
-### Add Upstream Remote
-
-First you will need to add the upstream remote to your local git:
-```bash
-# Add 'upstream' to the list of remotes
-git remote add upstream https://github.com/scylladb/scylla-operator.git
-
-# Verify the remote was added
-git remote -v
-```
-Now you should have at least `origin` and `upstream` remotes. You can also add other remotes to collaborate with other contributors.
-
-## Development
-
-To add a feature or to make a bug fix, you will need to create a branch in your fork and then submit a pull request (PR) from the branch.
-
-### Building the project
-
-You can build the project using the Makefile commands:
-* Open the Makefile and change the `IMG` environment variable to a repository you have access to.
-* Run `make docker-push` and wait for the image to be built and uploaded in your repo.
-
-### Create a Branch
-
-From a console, create a new branch based on your fork and start working on it:
-
-```bash
-# Ensure all your remotes are up to date with the latest
-git fetch --all
-
-# Create a new branch that is based off upstream master. Give it a simple, but descriptive name.
-# Generally it will be two to three words separated by dashes and without numbers.
-git checkout -b feature-name upstream/master
-```
-
-Now you are ready to make the changes and commit to your branch.
-
-### Updating Your Fork
-
-During the development lifecycle, you will need to keep up-to-date with the latest upstream master. As others on the team push changes, you will need to `rebase` your commits on top of the latest. This avoids unnecessary merge commits and keeps the commit history clean.
-
-Whenever you need to update your local repository, you never want to merge. You **always** will rebase. Otherwise you will end up with merge commits in the git history. If you have any modified files, you will first have to stash them (`git stash save -u ""`).
-
-```bash
-git fetch --all
-git rebase upstream/master
-```
-
-Rebasing is a very powerful feature of Git. You need to understand how it works or else you will risk losing your work. Read about it in the [Git documentation](https://git-scm.com/docs/git-rebase), it will be well worth it. In a nutshell, rebasing does the following:
-- "Unwinds" your local commits. Your local commits are removed temporarily from the history.
-- The latest changes from upstream are added to the history
-- Your local commits are re-applied one by one
-- If there are merge conflicts, you will be prompted to fix them before continuing. Read the output closely. It will tell you how to complete the rebase.
-- When done rebasing, you will see all of your commits in the history.
-
-## Submitting a Pull Request
-
-Once you have implemented the feature or bug fix in your branch, you will open a PR to the upstream repo. Before opening the PR ensure you have added unit tests, are passing the integration tests, cleaned your commit history, and have rebased on the latest upstream.
-
-In order to open a pull request (PR) it is required to be up to date with the latest changes upstream. If other commits are pushed upstream before your PR is merged, you will also need to rebase again before it will be merged.
-
-### Commit History
-
-To prepare your branch to open a PR, you will need to have the minimal number of logical commits so we can maintain
-a clean commit history. Most commonly a PR will include a single commit where all changes are squashed, although
-sometimes there will be multiple logical commits.
-
-```bash
-# Inspect your commit history to determine if you need to squash commits
-git log
-
-# Rebase the commits and edit, squash, or even reorder them as you determine will keep the history clean.
-# In this example, the last 5 commits will be opened in the git rebase tool.
-git rebase -i HEAD~5
-```
-
-Once your commit history is clean, ensure you have based on the [latest upstream](#updating-your-fork) before you open the PR.
-
-### Commit messages
-
-Please make the first line of your commit message a summary of the change that a user (not a developer) of Operator would like to read,
-and prefix it with the most relevant directory of the change followed by a colon.
-The changelog gets made by looking at just these first lines so make it good!
-
-If you have more to say about the commit, then enter a blank line and carry on the description.
-Remember to say why the change was needed - the commit itself shows what was changed.
-
-Writing more is better than less. Comparing the behaviour before the change to that after the change is very useful.
-Imagine you are writing to yourself in 12 months time when you've forgotten everything about what you just did, and you need to get up to speed quickly.
-
-If the change fixes an issue then write Fixes #1234 in the commit message.
-This can be on the subject line if it will fit. If you don't want to close the associated issue just put #1234 and the change will get linked into the issue.
-
-Here is an example of a short commit message:
-
-```
-sidecar: log on reconcile loop - fixes #1234
-```
-
-And here is an example of a longer one:
-```
-
-api: now supports host networking (#1234)
-
-The operator CRD now has a "network" property that can be used to
-select host networking as well as setting the apropriate DNS policy.
-
-Fixes #1234
-```
-
-### Submitting
-
-Go to the [Scylla Operator github](https://www.github.com/scylladb/scylla-operator) to open the PR. If you have pushed recently, you should see an obvious link to open the PR. If you have not pushed recently, go to the Pull Request tab and select your fork and branch for the PR.
-
-After the PR is open, you can make changes simply by pushing new commits. Your PR will track the changes in your fork and update automatically.
diff --git a/docs/source/eks.md b/docs/source/eks.md
deleted file mode 100644
index 94abd21b15e..00000000000
--- a/docs/source/eks.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# Deploying Scylla on EKS
-
-This guide is focused on deploying Scylla on EKS with improved performance.
-Performance tricks used by the script won't work with different machine tiers.
-It sets up the kubelets on EKS nodes to run with [static cpu policy](https://kubernetes.io/blog/2018/07/24/feature-highlight-cpu-manager/) and uses [local sdd disks](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html) in RAID0 for maximum performance.
-
-Most of the commands used to setup the Scylla cluster are the same for all environments
-As such we have tried to keep them separate in the [general guide](generic.md).
-
-## TL;DR;
-
-If you don't want to run the commands step-by-step, you can just run a script that will set everything up for you:
-```bash
-# Edit according to your preference
-EKS_REGION=us-east-1
-EKS_ZONES=us-east-1a,us-east-1b,us-east-1c
-
-# From inside the examples/eks folder
-cd examples/eks
-./eks.sh -z "$EKS_ZONES" -r "$EKS_REGION"
-```
-
-After you deploy, see how you can [benchmark your cluster with cassandra-stress](generic.md#benchmark-with-cassandra-stress).
-
-## Walkthrough
-
-### EKS Setup
-
-#### Configure environment variables
-
-First of all, we export all the configuration options as environment variables.
-Edit according to your own environment.
-
-```
-EKS_REGION=us-east-1
-EKS_ZONES=us-east-1a,us-east-1b,us-east-1c
-CLUSTER_NAME=scylla-demo
-```
-
-#### Creating an EKS cluster
-
-For this guide, we'll create an EKS cluster with the following:
-
-* A NodeGroup of 3 `i3-2xlarge` Nodes, where the Scylla Pods will be deployed. These nodes will only accept pods having `scylla-clusters` toleration.
-
-```
- - name: scylla-pool
- instanceType: i3.2xlarge
- desiredCapacity: 3
- labels:
- scylla.scylladb.com/node-type: scylla
- taints:
- role: "scylla-clusters:NoSchedule"
- ssh:
- allow: true
- kubeletExtraConfig:
- cpuManagerPolicy: static
-```
-
-* A NodeGroup of 4 `c4.2xlarge` Nodes to deploy `cassandra-stress` later on. These nodes will only accept pods having `cassandra-stress` toleration.
-
-```
- - name: cassandra-stress-pool
- instanceType: c4.2xlarge
- desiredCapacity: 4
- labels:
- pool: "cassandra-stress-pool"
- taints:
- role: "cassandra-stress:NoSchedule"
- ssh:
- allow: true
-```
-
-* A NodeGroup of 1 `i3.large` Node, where the monitoring stack and operator will be deployed.
-```
- - name: monitoring-pool
- instanceType: i3.large
- desiredCapacity: 1
- labels:
- pool: "monitoring-pool"
- ssh:
- allow: true
-```
-
-### Prerequisites
-
-#### Installing script third party dependencies
-
-Script requires several dependencies:
-- eksctl - See: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html
-- kubectl - See: https://kubernetes.io/docs/tasks/tools/install-kubectl/
-
-### Deploying ScyllaDB Operator
-
-Refer to [Deploying Scylla on a Kubernetes Cluster](generic.md) in the ScyllaDB Operator documentation to deploy the ScyllaDB Operator and its prerequisites.
-
-#### Setting up nodes for ScyllaDB
-
-ScyllaDB, except when in developer mode, requires storage with XFS filesystem. The local NVMes from the cloud provider usually come as individual devices. To use their full capacity together, you'll first need to form a RAID array from those disks.
-`NodeConfig` performs the necessary RAID configuration and XFS filesystem creation, as well as it optimizes the nodes. You can read more about it in [Performance tuning](performance.md) section of ScyllaDB Operator's documentation.
-
-Deploy `NodeConfig` to let it take care of the above operations:
-```
-kubectl apply --server-side -f examples/eks/nodeconfig-alpha.yaml
-```
-
-#### Deploying Local Volume Provisioner
-
-Afterwards, deploy ScyllaDB's [Local Volume Provisioner](https://github.com/scylladb/k8s-local-volume-provisioner), capable of dynamically provisioning PersistentVolumes for your ScyllaDB clusters on mounted XFS filesystems, earlier created over the configured RAID0 arrays.
-```
-kubectl -n local-csi-driver apply --server-side -f examples/common/local-volume-provisioner/local-csi-driver/
-```
-
-### Deploying ScyllaDB
-
-Now you can follow the steps described in [Deploying Scylla on a Kubernetes Cluster](generic.md) to launch your ScyllaDB cluster in a highly performant environment.
-
-#### Accessing the database
-
-Instructions on how to access the database can also be found in the [generic guide](generic.md).
-
-### Deleting an EKS cluster
-
-Once you are done with your experiments delete your cluster using the following command:
-
-```
-eksctl delete cluster "${CLUSTER_NAME}"
-```
diff --git a/docs/source/generic.md b/docs/source/generic.md
index 2f5db439956..2bdc7396937 100644
--- a/docs/source/generic.md
+++ b/docs/source/generic.md
@@ -268,7 +268,7 @@ To change it simply remove the secret. Operator will create a new one. To pick u
## Set up monitoring
-To set up monitoring using Prometheus and Grafana follow [this guide](monitoring.md).
+To set up monitoring using Prometheus and Grafana follow [this guide](./resources/scylladbmonitorings.md).
## Scale a ScyllaCluster
diff --git a/docs/source/gke.md b/docs/source/gke.md
deleted file mode 100644
index 27f99ea439c..00000000000
--- a/docs/source/gke.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Deploying Scylla on GKE
-
-This guide is focused on deploying Scylla on GKE with maximum performance (without any persistence guarantees).
-It sets up the kubelets on GKE nodes to run with [static cpu policy](https://kubernetes.io/blog/2018/07/24/feature-highlight-cpu-manager/) and uses [local sdd disks](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) in RAID0 for maximum performance.
-
-Most of the commands used to setup the Scylla cluster are the same for all environments
-As such we have tried to keep them separate in the [general guide](generic.md).
-
-## TL;DR;
-
-If you don't want to run the commands step-by-step, you can just run a script that will set everything up for you:
-```bash
-# Edit according to your preference
-GCP_USER=$(gcloud config list account --format "value(core.account)")
-GCP_PROJECT=$(gcloud config list project --format "value(core.project)")
-GCP_ZONE=us-west1-b
-
-# From inside the examples/gke folder
-cd examples/gke
-./gke.sh -u "$GCP_USER" -p "$GCP_PROJECT" -z "$GCP_ZONE"
-
-# Example:
-# ./gke.sh -u yanniszark@arrikto.com -p gke-demo-226716 -z us-west1-b
-```
-
-:::{warning}
-Make sure to pass a ZONE (ex.: us-west1-b) and not a REGION (ex.: us-west1) or it will deploy nodes in each ZONE available in the region.
-:::
-
-After you deploy, see how you can [benchmark your cluster with cassandra-stress](generic.md#benchmark-with-cassandra-stress).
-
-## Walkthrough
-
-### Google Kubernetes Engine Setup
-
-#### Configure environment variables
-
-First of all, we export all the configuration options as environment variables.
-Edit according to your own environment.
-
-```
-GCP_USER=$( gcloud config list account --format "value(core.account)" )
-GCP_PROJECT=$( gcloud config list project --format "value(core.project)" )
-GCP_REGION=us-west1
-GCP_ZONE=us-west1-b
-CLUSTER_NAME=scylla-demo
-CLUSTER_VERSION=$( gcloud container get-server-config --zone ${GCP_ZONE} --format "value(validMasterVersions[0])" )
-```
-
-#### Creating a GKE cluster
-
-First we need to change kubelet CPU Manager policy to static by providing a config file. Create file called `systemconfig.yaml` with the following content:
-```
-kubeletConfig:
- cpuManagerPolicy: static
-```
-
-Then we'll create a GKE cluster with the following:
-
-1. A NodePool of 2 `n1-standard-8` Nodes, where the operator and the monitoring stack will be deployed. These are generic Nodes and their free capacity can be used for other purposes.
- ```
- gcloud container \
- clusters create "${CLUSTER_NAME}" \
- --cluster-version "${CLUSTER_VERSION}" \
- --node-version "${CLUSTER_VERSION}" \
- --machine-type "n1-standard-8" \
- --num-nodes "2" \
- --disk-type "pd-ssd" --disk-size "20" \
- --image-type "UBUNTU_CONTAINERD" \
- --enable-stackdriver-kubernetes \
- --no-enable-autoupgrade \
- --no-enable-autorepair
- ```
-
-2. A NodePool of 2 `n1-standard-32` Nodes to deploy `cassandra-stress` later on.
-
- ```
- gcloud container --project "${GCP_PROJECT}" \
- node-pools create "cassandra-stress-pool" \
- --cluster "${CLUSTER_NAME}" \
- --zone "${GCP_ZONE}" \
- --node-version "${CLUSTER_VERSION}" \
- --machine-type "n1-standard-32" \
- --num-nodes "2" \
- --disk-type "pd-ssd" --disk-size "20" \
- --node-taints role=cassandra-stress:NoSchedule \
- --image-type "UBUNTU_CONTAINERD" \
- --no-enable-autoupgrade \
- --no-enable-autorepair
- ```
-
-3. A NodePool of 4 `n1-standard-32` Nodes, where the Scylla Pods will be deployed. Each of these Nodes has 8 local NVMe SSDs attached, which are provided as [raw block devices](https://cloud.google.com/kubernetes-engine/docs/concepts/local-ssd#block). It is important to disable `autoupgrade` and `autorepair`. Automatic cluster upgrade or node repair has a hard timeout after which it no longer respect PDBs and force deletes the Compute Engine instances, which also deletes all data on the local SSDs. At this point, it's better to handle upgrades manually, with more control over the process and error handling.
- ```
- gcloud container \
- node-pools create "scylla-pool" \
- --cluster "${CLUSTER_NAME}" \
- --node-version "${CLUSTER_VERSION}" \
- --machine-type "n1-standard-32" \
- --num-nodes "4" \
- --disk-type "pd-ssd" --disk-size "20" \
- --local-nvme-ssd-block count="8" \
- --node-taints role=scylla-clusters:NoSchedule \
- --node-labels scylla.scylladb.com/node-type=scylla \
- --image-type "UBUNTU_CONTAINERD" \
- --system-config-from-file=systemconfig.yaml \
- --no-enable-autoupgrade \
- --no-enable-autorepair
- ```
-
-#### Setting Yourself as `cluster-admin`
-> (By default GKE doesn't give you the necessary RBAC permissions)
-
-Get the credentials for your new cluster
-```
-gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone="${GCP_ZONE}"
-```
-
-Create a ClusterRoleBinding for your user.
-In order for this to work you need to have at least permission `container.clusterRoleBindings.create`.
-The easiest way to obtain this permission is to enable the `Kubernetes Engine Admin` role for your user in the GCP IAM web interface.
-```
-kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user "${GCP_USER}"
-```
-
-
-### Prerequisites
-
-### Deploying ScyllaDB Operator
-
-Refer to [Deploying Scylla on a Kubernetes Cluster](generic.md) in the ScyllaDB Operator documentation to deploy the ScyllaDB Operator and its prerequisites.
-
-#### Setting up nodes for ScyllaDB
-
-ScyllaDB, except when in developer mode, requires storage with XFS filesystem. The local NVMes from the cloud provider usually come as individual devices. To use their full capacity together, you'll first need to form a RAID array from those disks.
-`NodeConfig` performs the necessary RAID configuration and XFS filesystem creation, as well as it optimizes the nodes. You can read more about it in [Performance tuning](performance.md) section of ScyllaDB Operator's documentation.
-
-Deploy `NodeConfig` to let it take care of the above operations:
-```
-kubectl apply --server-side -f examples/gke/nodeconfig-alpha.yaml
-```
-
-#### Deploying Local Volume Provisioner
-
-Afterwards, deploy ScyllaDB's [Local Volume Provisioner](https://github.com/scylladb/k8s-local-volume-provisioner), capable of dynamically provisioning PersistentVolumes for your ScyllaDB clusters on mounted XFS filesystems, earlier created over the configured RAID0 arrays.
-```
-kubectl -n local-csi-driver apply --server-side -f examples/common/local-volume-provisioner/local-csi-driver/
-kubectl apply --server-side -f examples/common/local-volume-provisioner/local-csi-driver/00_scylladb-local-xfs.storageclass.yaml
-```
-
-### Deploy Scylla cluster
-In order for the example to work you need to modify the cluster definition in the following way:
-
-```
-sed -i "s//${GCP_REGION}/g;s//${GCP_ZONE}/g" examples/gke/cluster.yaml
-```
-
-This will inject your region and zone into the cluster definition so that it matches the kubernetes cluster you just created.
-
-### Deploying ScyllaDB
-
-Now you can follow the steps described in [Deploying Scylla on a Kubernetes Cluster](generic.md) to launch your ScyllaDB cluster in a highly performant environment.
-
-#### Accessing the database
-
-Instructions on how to access the database can also be found in the [generic guide](generic.md).
-
-### Deleting a GKE cluster
-
-Once you are done with your experiments delete your cluster using the following command:
-
-```
-gcloud container --project "${GCP_PROJECT}" clusters delete --zone "${GCP_ZONE}" "${CLUSTER_NAME}"
-```
diff --git a/docs/source/index.md b/docs/source/index.md
new file mode 100644
index 00000000000..948f731202f
--- /dev/null
+++ b/docs/source/index.md
@@ -0,0 +1,109 @@
+---
+sd_hide_title: true
+---
+
+# Scylla Operator Documentation
+
+:::{toctree}
+:hidden:
+:maxdepth: 1
+
+architecture/index
+installation/index
+resources/index
+quickstarts/index
+support/index
+api-reference/index
+:::
+
+## Scylla Operator
+
+::::{grid} 1 1 1 2
+:reverse:
+
+:::{grid-item}
+:columns: 12 12 12 4
+
+```{image} logo.png
+:width: 150pt
+:class: sd-m-auto
+:name: landing-page-logo
+```
+
+:::
+
+:::{grid-item}
+:child-align: justify
+:columns: 12 12 12 8
+:class: sd-d-flex-column
+
+{{productName}} project helps users to run ScyllaDB on Kubernetes.
+It extends the Kubernetes APIs using [CustomResourceDefinitions(CRDs)](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) and runs controllers that reconcile the desired state declared using these APIs.
+
+{{productName}} works with both ScyllaDB Open Source and ScyllaDB Enterprise. You only have to [change the ScyllaCluster image repository]() and [adjust the ScyllaDB utils image using ScyllaOperatorConfig]()
+
+Here is a subset of items to start with bellow.
+You can also navigate through the documentation using the menu.
+
+:::
+
+::::
+
+
+::::{grid} 1 1 2 3
+:gutter: 4
+
+:::{grid-item-card} {material-regular}`architecture;2em` Architecture
+:link: architecture/overview
+
+Learn about the components of Scylla Operator and how they fit together.
++++
+[Learn more »](architecture/overview)
+:::
+
+:::{grid-item-card} {material-regular}`electric_bolt;2em` Installation
+:link: installation/overview
+
+Configure your Kubernetes platform, install prerequisites and all components of {{productName}}.
++++
+[Learn more »](installation/overview)
+:::
+
+:::{grid-item-card} {material-regular}`storage;2em` Working with Resources
+:link: resources/overview
+Learn about the APIs that {{productName}} provides. ScyllaClusters, ScyllaDBMonitorings and more.
++++
+[Learn more »](resources/overview)
+:::
+
+:::{grid-item-card} {material-regular}`explore;2em` Quickstarts
+:link: quickstarts/index
+
+Get it running right now. Simple GKE and EKS setups.
+
++++
+[Learn more »](quickstarts/index)
+:::
+
+:::{grid-item-card} {material-regular}`fitness_center;2em` Performance Tuning
+:link: architecture/tuning
+Tuning your infra and ScyllaDB cluster for the best performance and latency.
++++
+[Learn more »](architecture/tuning)
+:::
+
+:::{grid-item-card} {material-regular}`build;2em` Support
+:link: support/overview
+FAQs, support matrix, must-gather and more.
++++
+[Learn more »](support/overview)
+:::
+
+:::{grid-item-card} {material-regular}`menu_book;2em` API Rererence
+:link: api-reference/index
+Visit the automatically generated API reference for all our APIs.
++++
+[Learn more »](api-reference/index)
+:::
+
+::::
diff --git a/docs/source/installation/deploy.odg b/docs/source/installation/deploy.odg
new file mode 100644
index 00000000000..6d78990b951
Binary files /dev/null and b/docs/source/installation/deploy.odg differ
diff --git a/docs/source/installation/deploy.svg b/docs/source/installation/deploy.svg
new file mode 100644
index 00000000000..aca6232423e
--- /dev/null
+++ b/docs/source/installation/deploy.svg
@@ -0,0 +1,577 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/installation/gitops.md b/docs/source/installation/gitops.md
new file mode 100644
index 00000000000..6ec8f58726b
--- /dev/null
+++ b/docs/source/installation/gitops.md
@@ -0,0 +1,208 @@
+# GitOps (kubectl)
+
+## Disclaimer
+
+The following commands reference manifests that come from the same repository as the source code is being built from.
+This means we can't have a pinned reference to the latest release as that is a [chicken-egg problem](https://en.wikipedia.org/wiki/Chicken_or_the_egg). Therefore, we use a rolling tag for the particular branch in our manifests.
+:::{caution}
+For production deployment, you should always replace the {{productName}} image with a stable reference.
+We'd encourage you to use a sha reference, although using full-version tags is also fine.
+:::
+
+
+## Installation
+
+### Prerequisites
+
+Scylla Operator has a few dependencies that you need to install to your cluster first.
+
+In case you already have a supported version of each of these dependencies installed in your cluster, you can skip this part.
+
+#### Cert Manager
+
+:::{code-block} shell
+:substitutions:
+
+# Deploy cert-manager.
+kubectl -n=cert-manager apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/third-party/cert-manager.yaml
+:::
+
+:::{code-block} shell
+# Wait for CRDs to propagate to all apiservers.
+kubectl wait --for condition=established --timeout=60s crd/certificates.cert-manager.io crd/issuers.cert-manager.io
+
+# Wait for components that others steps depend on.
+for deploy in cert-manager{,-cainjector,-webhook}; do
+ kubectl -n=cert-manager rollout status --timeout=10m deployment.apps/"${deploy}"
+done
+
+# Wait for webhook CA secret to be created.
+for i in {1..30}; do
+ { kubectl -n=cert-manager get secret/cert-manager-webhook-ca && break; } || sleep 1
+done
+:::
+
+#### Prometheus Operator
+
+:::{code-block} shell
+:substitutions:
+
+kubectl -n=prometheus-operator apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/third-party/prometheus-operator.yaml
+:::
+
+:::{code-block} shell
+# Wait for CRDs to propagate to all apiservers.
+kubectl wait --for='condition=established' crd/prometheuses.monitoring.coreos.com crd/prometheusrules.monitoring.coreos.com crd/servicemonitors.monitoring.coreos.com
+
+# Wait for prometheus operator deployment.
+kubectl -n=prometheus-operator rollout status --timeout=10m deployment.apps/prometheus-operator
+
+# Wait for webhook CA secret to be created.
+for i in {1..30}; do
+ { kubectl -n=cert-manager get secret/cert-manager-webhook-ca && break; } || sleep 1
+done
+:::
+
+### {{productName}}
+
+Once you have the dependencies installed and available in your cluster, it is the time to install the {{productName}}.
+
+:::{code-block} shell
+:substitutions:
+
+kubectl -n=scylla-operator apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/deploy/operator/operator.yaml
+:::
+
+::::{caution}
+{{productName}} deployment references its own image that it later runs alongside each ScyllaDB instance. Therefore, you have to also replace the image in the environment variable called `SCYLLA_OPERATOR_IMAGE`:
+:::{code-block} yaml
+:linenos:
+:emphasize-lines: 16,19,20
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: scylla-operator
+ namespace: scylla-operator
+# ...
+spec:
+ # ...
+ template:
+ # ...
+ spec:
+ # ...
+ containers:
+ - name: scylla-operator
+ # ...
+ image: docker.io/scylladb/scylla-operator:1.14.0@sha256:8c75c5780e2283f0a8f9734925352716f37e0e7f41007e50ce9b1d9924046fa1
+ env:
+ # ...
+ - name: SCYLLA_OPERATOR_IMAGE
+ value: docker.io/scylladb/scylla-operator:1.14.0@sha256:8c75c5780e2283f0a8f9734925352716f37e0e7f41007e50ce9b1d9924046fa1
+:::
+The {{productName}} image value and the `SCYLLA_OPERATOR_IMAGE` shall always match.
+Be careful not to use a rolling tag for any of them to avoid an accidental skew!
+::::
+
+:::{code-block} shell
+# Wait for CRDs to propagate to all apiservers.
+kubectl wait --for='condition=established' crd/scyllaclusters.scylla.scylladb.com crd/nodeconfigs.scylla.scylladb.com crd/scyllaoperatorconfigs.scylla.scylladb.com crd/scylladbmonitorings.scylla.scylladb.com
+
+# Wait for the components to deploy.
+kubectl -n=scylla-operator rollout status --timeout=10m deployment.apps/{scylla-operator,webhook-server}
+
+# Wait for webhook CA secret to be created.
+for i in {1..30}; do
+ { kubectl -n=cert-manager get secret/cert-manager-webhook-ca && break; } || sleep 1
+done
+:::
+
+### Setting up local storage on nodes and enabling tuning
+
+:::{caution}
+The following step heavily depends on the platform that you use, the machine type, or the options chosen when creating a node pool.
+
+Please review the [NodeConfig](../resources/nodeconfigs.md) and adjust it for your platform!
+:::
+
+:::::{tab-set}
+
+::::{tab-item} GKE (NVMe)
+:::{code-block} shell
+:substitutions:
+kubectl -n=scylla-operator apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/gke/nodeconfig-alpha.yaml
+:::
+::::
+
+::::{tab-item} EKS (NVMe)
+:::{code-block} shell
+:substitutions:
+kubectl -n=scylla-operator apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/eks/nodeconfig-alpha.yaml
+:::
+::::
+
+::::{tab-item} Any platform (Loop devices)
+:::{caution}
+This NodeConfig sets up loop devices instead of NVMe disks and is only intended for development purposes when you don't have the NVMe disks available.
+Do not expect meaningful performance with this setup.
+:::
+:::{code-block} shell
+:substitutions:
+kubectl -n=scylla-operator apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/generic/nodeconfig-alpha.yaml
+:::
+::::
+
+:::::
+
+:::{note}
+Performance tuning is enabled for all nodes that are selected by the [NodeConfig](../resources/nodeconfigs.md) by default, unless opted-out.
+:::
+
+:::{code-block} shell
+# Wait for the NodeConfig to apply changes to the Kubernetes nodes.
+kubectl wait --for='condition=Reconciled' --timeout=10m nodeconfigs.scylla.scylladb.com/cluster
+:::
+
+### Local CSI driver
+
+:::{code-block} shell
+:substitutions:
+
+kubectl -n=local-csi-driver apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/examples/common/local-volume-provisioner/local-csi-driver/{00_namespace,00_scylladb-local-xfs.storageclass,10_csidriver,10_driver.serviceaccount,10_provisioner_clusterrole,20_provisioner_clusterrolebinding,50_daemonset}.yaml
+:::
+
+:::{code-block} shell
+# Wait for it to deploy.
+kubectl -n=local-csi-driver rollout status --timeout=10m daemonset.apps/local-csi-driver
+:::
+
+### ScyllaDB Manager
+
+:::{include} ../.internal/manager-license-note.md
+:::
+
+:::::{tab-set}
+
+::::{tab-item} Production (sized)
+:::{code-block} shell
+:substitutions:
+kubectl -n=scylla-manager apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/deploy/manager-prod.yaml
+:::
+::::
+
+::::{tab-item} Development (sized)
+:::{code-block} shell
+:substitutions:
+kubectl -n=scylla-manager apply --server-side -f=https://raw.githubusercontent.com/{{repository}}/{{revision}}/deploy/manager-dev.yaml
+:::
+::::
+
+:::::
+
+:::{code-block} shell
+# Wait for it to deploy.
+kubectl -n=local-csi-driver rollout status --timeout=10m daemonset.apps/local-csi-driver
+:::
+
+## Next steps
+
+Now that you've successfully installed {{productName}}, it's time to look at [how to run ScyllaDB](../resources/scyllaclusters/overview.md).
diff --git a/docs/source/helm.md b/docs/source/installation/helm.md
similarity index 90%
rename from docs/source/helm.md
rename to docs/source/installation/helm.md
index e157523c417..34125ce53f2 100644
--- a/docs/source/helm.md
+++ b/docs/source/installation/helm.md
@@ -1,4 +1,7 @@
-# Deploying Scylla stack using Helm Charts
+# Helm
+
+:::{include} ../.internal/helm-crd-warning.md
+:::
In this example we will install Scylla stack on Kubernetes. This includes the following components:
- Scylla Operator
@@ -163,7 +166,7 @@ racks:
Above cluster will use 4.3.0 Scylla, 2.2.1 Scylla Manager Agent sidecar and will have a single rack having 2 nodes.
Each node will have a single CPU and 1 GiB of memory.
-For other customizable fields, please refer to [ScyllaCluster CRD](api-reference/groups/scylla.scylladb.com/scyllaclusters.rst).
+For other customizable fields, please refer to [ScyllaCluster CRD](../api-reference/groups/scylla.scylladb.com/scyllaclusters.rst).
CRD Rack Spec and Helm Chart Rack should have the same fields.
### Installation
@@ -180,7 +183,7 @@ Scylla Operator will provision this cluster on your K8s environment.
Scylla Manager Chart allows to customize and deploy Scylla Manager in K8s environment.
Scylla Manager consist of two applications (Scylla Manager itself and Scylla Manager Controller) and additional Scylla cluster.
-To read more about Scylla Manager see [Manager guide](manager.md).
+To read more about Scylla Manager see [Manager guide](../manager.md).
### Scylla Manager
@@ -313,7 +316,7 @@ Two running nodes, exactly what we were asking for.
## Monitoring
-To spin up a Prometheus monitoring refer to [monitoring guide](monitoring.md).
+To spin up a Prometheus monitoring refer to [monitoring guide](../resources/scylladbmonitorings.md).
Helm charts can create ServiceMonitors needed to observe Scylla Manager and Scylla.
Both of these Helm Charts allows to specify whether you want to create a ServiceMonitor:
@@ -329,6 +332,26 @@ helm upgrade --install scylla --namespace scylla scylla/scylla -f examples/helm/
Helm should notice the difference, install the ServiceMonitor, and then Prometheous will be able to scrape metrics.
+## Upgrade via Helm
+
+Replace `` with the name of your Helm release for Scylla Operator and replace `` with the version number you want to install:
+1. Make sure Helm chart repository is up-to-date:
+ ```
+ helm repo add scylla-operator https://storage.googleapis.com/scylla-operator-charts/stable
+ helm repo update
+ ```
+2. Update CRD resources. We recommend using `--server-side` flag for `kubectl apply`, if your version supports it.
+ ```
+ tmpdir=$( mktemp -d ) \
+ && helm pull scylla-operator/scylla-operator --version --untar --untardir "${tmpdir}" \
+ && find "${tmpdir}"/scylla-operator/crds/ -name '*.yaml' -printf '-f=%p ' \
+ | xargs kubectl apply
+ ```
+3. Update Scylla Operator
+ ```
+ helm upgrade --version scylla-operator/scylla-operator
+ ```
+
## Cleanup
To remove these applications you can simply uninstall them using Helm CLI:
diff --git a/docs/source/installation/index.md b/docs/source/installation/index.md
new file mode 100644
index 00000000000..50a02ed71e1
--- /dev/null
+++ b/docs/source/installation/index.md
@@ -0,0 +1,10 @@
+# Installation
+
+:::{toctree}
+:maxdepth: 1
+
+overview
+kubernetes/index
+gitops
+helm
+:::
diff --git a/docs/source/installation/kubernetes/eks.md b/docs/source/installation/kubernetes/eks.md
new file mode 100644
index 00000000000..0f94317f4c9
--- /dev/null
+++ b/docs/source/installation/kubernetes/eks.md
@@ -0,0 +1,17 @@
+# EKS
+
+## Kubelet
+
+### Static CPU policy
+
+`eksctl` allows you to set [static CPU policy](./generic.md#static-cpu-policy) for each node pool like:
+```{code} yaml
+:number-lines:
+apiVersion: eksctl.io/v1alpha5
+kind: ClusterConfig
+# ...
+nodeGroups:
+- name: scylla-pool
+ kubeletExtraConfig:
+ cpuManagerPolicy: static
+```
diff --git a/docs/source/installation/kubernetes/generic.md b/docs/source/installation/kubernetes/generic.md
new file mode 100644
index 00000000000..11585ad38b6
--- /dev/null
+++ b/docs/source/installation/kubernetes/generic.md
@@ -0,0 +1,25 @@
+# Generic
+
+Because {{productName}} aims to leverage the best performance available, there is a few extra steps that need to be configured on your Kubernetes cluster.
+
+## Kubelet
+
+### Static CPU policy
+
+By default, *kubelet* uses the CFS quota to enforce pod CPU limits.
+When the Kubernetes node runs a lot of CPU-bound Pods, the processes can move over different CPU cores, depending on whether the Pod
+is throttled and which CPU cores are available.
+However, kubelet may be configured to assign CPUs exclusively by setting the CPU manager policy to static.
+
+To get the best performance and latency ScyllaDB Pods should run under [static CPU policy](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/#static-policy) to pin cores.
+
+:::{note}
+Configuring kubelet options is provider specific.
+We provide a few examples for the major ones later in this section, otherwise please consult the documentation for your Kubernetes platform.
+:::
+
+## Nodes
+
+### Labels
+
+For the purposes of the installation guides, we assume that the nodes meant to run ScyllaDB (ScyllaClusters) have label `scylla.scylladb.com/node-type=scylla`.
diff --git a/docs/source/installation/kubernetes/gke.md b/docs/source/installation/kubernetes/gke.md
new file mode 100644
index 00000000000..b1ba88d1fa4
--- /dev/null
+++ b/docs/source/installation/kubernetes/gke.md
@@ -0,0 +1,12 @@
+# GKE
+
+## Kubelet
+
+### Static CPU policy
+
+GKE allows you to set [static CPU policy](./generic.md#static-cpu-policy) using a [node system configuration](https://cloud.google.com/kubernetes-engine/docs/how-to/node-system-config)
+:::{code} yaml
+:number-lines:
+kubeletConfig:
+ cpuManagerPolicy: static
+:::
diff --git a/docs/source/installation/kubernetes/index.md b/docs/source/installation/kubernetes/index.md
new file mode 100644
index 00000000000..fc560ee48e6
--- /dev/null
+++ b/docs/source/installation/kubernetes/index.md
@@ -0,0 +1,9 @@
+# Kubernetes
+
+:::{toctree}
+:maxdepth: 1
+
+generic
+eks
+gke
+:::
diff --git a/docs/source/installation/overview.md b/docs/source/installation/overview.md
new file mode 100644
index 00000000000..0dfc798155c
--- /dev/null
+++ b/docs/source/installation/overview.md
@@ -0,0 +1,106 @@
+# Overview
+
+## Kubernetes
+
+{{productName}} is a set of controllers and Kubernetes API extensions.
+Therefore, we assume you either have an existing (conformant) Kubenernetes cluster and/or are already familiar with how a Kubernetes cluster is deployed and operated.
+
+{{productName}} controllers and API extensions may have dependencies on some of the newer Kubernetes features and APIs that need to be available.
+More over, {{productName}} implements additional features like performance tuning, some of which are platform/OS specific.
+While we do our best to implement these routines as generically as possible, sometimes there isn't any low level API to base them on and they may work only on a subset of platforms.
+
+:::{caution}
+We *strongly* recommend using a [supported Kubernetes platform](../support/releases.md#supported-kubernetes-platforms).
+Issues on unsupported platforms are unlikely to be addressed.
+:::
+
+:::{note}
+Before reporting and issue, please see our [support page](../support/overview.md) and [troubleshooting installation issues](../support/troubleshooting/installation)
+:::
+
+## {{productName}} components
+
+Scylla Operator consists of multiple components that need to be installed in your cluster.
+This is by no means a complete list of all resources, rather is aims to show the major components in one place.
+
+
+```{figure} deploy.svg
+:class: sd-m-auto
+:name: deploy-overview
+```
+
+:::{note}
+Depending on [which storage provisioner you choose](../architecture/storage/overview.md), the `local-csi-driver` may be replaced or complemented by a different component.
+:::
+
+### {{productName}}
+
+{{productName}} contains the Kubernetes API extensions and corresponding controllers and admission hooks that run inside `scylla-operator` namespace.
+
+You can learn more about the APIs in [resources section](../resources/overview.md) and the [generated API reference](../api-reference/index.rst).
+
+### ScyllaDB Manager
+
+ScyllaDB Manager is a global deployment that is responsible for operating all [ScyllaClusters](../api-reference/groups/scylla.scylladb.com/scyllaclusters.rst) and runs inside `scylla-manager` namespace.
+There is a corresponding controller running in [{{productName}}](./#{{productName}}) that syncs the [ScyllaCluster](../api-reference/groups/scylla.scylladb.com/scyllaclusters.rst) metadata, [backups](../api-reference/groups/scylla.scylladb.com/scyllaclusters/#spec-backups) and [repairs](../api-reference/groups/scylla.scylladb.com/scyllaclusters.rst#spec-repairs) tasks into the manager (and vice versa) and avoids accessing the shared instance by users. Unfortunately, at this point, other task like restoring from a backup require executing into the shared ScyllaDB Manager deployment which effectively needs administrator privileges.
+
+ScyllaDB Manager uses a small ScyllaCluster instance internally and thus depends on the {{productName}} deployment.
+
+### NodeConfig
+
+[NodeConfig](../resources/nodeconfigs.md) is a cluster-coped custom resource provided by {{productName}} that helps you set local disks on Kubernetes nodes, create and mount a file system, configure performance tuning and more.
+
+### ScyllaOperatorConfig
+
+[ScyllaOperatorConfig](../resources/scyllaoperatorconfigs.md) is a cluster-coped custom resource provided by {{productName}} to help you configure {{productName}}. It helps you configure auxiliary images, see which ones are in use and more.
+
+### Local CSI driver
+
+ScyllaDB provides you with a custom [Local CSI driver](../architecture/storage/local-csi-driver.md) that lets you dynamically provision PersistentVolumes, share the disk space but still track tha capacity and use quotas.
+
+## Notes
+
+:::{note}
+Before reporting and issue, please see our [support page](../support/overview.md) and [troubleshooting installation issues](../support/troubleshooting/installation#troubleshooting-installation-issues)
+:::
+
+## Installation modes
+
+Depending on your preference, there is more than one way to install {{productName}} and there may be more to come / or provided by other parties or supply chains.
+
+At this point, we provide 2 ways to install the operator - [GitOps/manifests](#gitops) and [Helm charts](#helm). Give we provide only a subset of helm charts for the main resources and because **Helm can't update CRDs** - you still have to resort to using the manifests or GitOps anyway. For a consistent experience we'd recommend using the [GitOps flow](#gitops) which will also give you a better idea about what you actually deploy.
+
+:::{caution}
+Do not use rolling tags (like `latest`, `1.14` with our manifests in production. The manifests and images for a particular release are tightly coupled and any update requires updating both of them, while the rolling tags may surprisingly update only the images.
+:::
+
+:::{note}
+To avoid races, when you create a CRD, you need to wait for it to be propagated to other instances of the kubernetes-apiserver, before you can reliably create the corresponding CRs.
+:::
+
+:::{note}
+When you create [ValidatingWebhookConfiguration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#webhook-configuration) or [MutatingWebhookConfiguration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#webhook-configuration), you have to wait for the corresponding webhook deployments to be available, or the kubernetes-apiserver will fail all requests for resources affected by these webhhok configurations.
+Also note that some platforms have non-conformant networking setups by default that prevents the kube-apiserver from talking to the webhooks - [see our troubleshooting guide for more info](../support/troubleshooting/installation.md#webhooks).
+:::
+
+### GitOps
+
+We provide a set of Kubernetes manifest that contain all necessary objects to apply to your Kubernetes cluster.
+Depending on your preference applying them may range from using Git+ArgoCD to Git+kubectl.
+To keep the instructions clear for everyone we'll demonstrate applying the manifests using `kubectl`, that everyone is familiar with and is able to translate it to the GitOps platform of his choosing.
+
+For details, please the [dedicated section describing the deployment using GitOps (kubectl)](./gitops.md).
+
+### Helm
+
+:::{include} ../.internal/helm-crd-warning.md
+:::
+
+For details, please the [dedicated section describing the deployment using Helm](./helm.md).
+
+## Upgrades
+
+{{productName}} supports N+1 upgrades only.
+That means to you can only update by 1 minor version at the time and wait for it to successfully roll out and then update all ScyllaCLusters that also run using the image that's being updated. ({{productName}} injects it as a sidecar to help run and manage ScyllaDB.)
+
+We value the stability of our APIs and all API changes are backwards compatible.
diff --git a/docs/source/upgrade.md b/docs/source/installation/upgrade.md
similarity index 82%
rename from docs/source/upgrade.md
rename to docs/source/installation/upgrade.md
index bc458be7c3d..ad1dc94dead 100644
--- a/docs/source/upgrade.md
+++ b/docs/source/installation/upgrade.md
@@ -1,31 +1,10 @@
-# Upgrade of Scylla Operator
+# Upgrades
This page describes Scylla Operator upgrade procedures.
There are two generic update procedures - via Helm and via kubectl. Before upgrading, please check this page to find out
if your target version requires additional upgrade steps.
-## Upgrade via Helm
-Helm doesn't support managing CustomResourceDefinition resources ([#5871](https://github.com/helm/helm/issues/5871), [#7735](https://github.com/helm/helm/issues/7735))
-These are only created on first install and never updated. In order to update them, users have to do it manually.
-
-Replace `` with the name of your Helm release for Scylla Operator and replace `` with the version number you want to install:
-1. Make sure Helm chart repository is up-to-date:
- ```
- helm repo add scylla-operator https://storage.googleapis.com/scylla-operator-charts/stable
- helm repo update
- ```
-2. Update CRD resources. We recommend using `--server-side` flag for `kubectl apply`, if your version supports it.
- ```
- tmpdir=$( mktemp -d ) \
- && helm pull scylla-operator/scylla-operator --version --untar --untardir "${tmpdir}" \
- && find "${tmpdir}"/scylla-operator/crds/ -name '*.yaml' -printf '-f=%p ' \
- | xargs kubectl apply
- ```
-3. Update Scylla Operator
- ```
- helm upgrade --version scylla-operator/scylla-operator
- ```
## Upgrade via kubectl
diff --git a/docs/source/manager.md b/docs/source/manager.md
deleted file mode 100644
index 9a8db2fd37a..00000000000
--- a/docs/source/manager.md
+++ /dev/null
@@ -1,258 +0,0 @@
-# Deploying Scylla Manager on a Kubernetes Cluster
-
-Scylla Manager is a product for database operations automation,
-it can schedule tasks such as repairs and backups.
-Scylla Manager can manage multiple Scylla clusters and run cluster-wide tasks
-in a controlled and predictable way.
-
-Scylla Manager is available for Scylla Enterprise customers and Scylla Open Source users.
-With Scylla Open Source, Scylla Manager is limited to 5 nodes.
-See the Scylla Manager [Proprietary Software License Agreement](https://www.scylladb.com/scylla-manager-software-license-agreement/) for details.
-
-## Prerequisites
-
-* Kubernetes cluster
-* Scylla Operator - see [generic guide](generic.md)
-
-## Architecture
-
-Scylla Manager in K8s consist of:
-- Dedicated Scylla Cluster
-
- Scylla Manager persists its state to a Scylla cluster.
-Additional small single node cluster is spawned in the Manager namespace.
-
-- Scylla Manager Controller
-
- Main mission of Controller is to watch changes of Scylla Clusters, and synchronize three states.
- 1. What user wants - task definition in CRD.
- 2. What Controller registered - Task name to Task ID mapping - CRD status.
- 3. Scylla Manager task listing - internal state of Scylla Manager.
-
- When Scylla Cluster CRD is being deployed Controller will register it in Scylla Manager once cluster reaches desired node count.
-Once Cluster is fully up and running it will schedule all tasks defined in Cluster CRD.
-Controller also supports task updates and unscheduling.
-
-- Scylla Manager
-
- Regular Scylla Manager, the same used in cloud and bare metal deployments.
-
-
-
-## Deploy Scylla Manager
-
-Deploy the Scylla Manager using the following commands:
-
-```console
-kubectl apply -f deploy/manager-prod.yaml
-```
-
-This will install the Scylla Manager in the `scylla-manager` namespace.
-You can check if the Scylla Manager is up and running with:
-
-```console
-kubectl -n scylla-manager get pods
-NAME READY STATUS RESTARTS AGE
-scylla-manager-cluster-manager-dc-manager-rack-0 2/2 Running 0 37m
-scylla-manager-controller-0 1/1 Running 0 28m
-scylla-manager-scylla-manager-7bd9f968b9-w25jw 1/1 Running 0 37m
-```
-
-As you can see there are three pods:
-* `scylla-manager-cluster-manager-dc-manager-rack-0` - is a single node Scylla cluster.
-* `scylla-manager-controller-0` - Scylla Manager Controller.
-* `scylla-manager-scylla-manager-7bd9f968b9-w25jw` - Scylla Manager.
-
-To see if Scylla Manager is fully up and running we can check their logs.
-To do this, execute following command:
-
- ```console
-kubectl -n scylla-manager logs scylla-manager-controller-0
-```
-
-The output should be something like:
-```console
-{"L":"INFO","T":"2020-09-23T11:25:27.882Z","M":"Scylla Manager Controller started","version":"","build_date":"","commit":"","built_by":"","go_version":"","options":{"Name":"scylla-manager-controller-0","Namespace":"scylla-manager","LogLevel":"debug","ApiAddress":"http://127.0.0.1:5080/api/v1"},"_trace_id":"LQEJV3kDR5Gx9M3XQ2YnnQ"}
-{"L":"INFO","T":"2020-09-23T11:25:28.435Z","M":"Registering Components.","_trace_id":"LQEJV3kDR5Gx9M3XQ2YnnQ"}
-```
-
-To check logs of Scylla Manager itself, use following command:
-```console
-kubectl -n scylla-manager logs scylla-manager-scylla-manager-7bd9f968b9-w25jw
-```
-
-The output should be something like:
-
-```console
-{"L":"INFO","T":"2020-09-23T11:26:53.238Z","M":"Scylla Manager Server","version":"2.1.2-0.20200816.76cc4dcc","pid":1,"_trace_id":"xQhkJ0OuR8e6iMDEpM62Hg"}
-{"L":"INFO","T":"2020-09-23T11:26:54.519Z","M":"Using config","config":{"HTTP":"127.0.0.1:5080","HTTPS":"","TLSCertFile":"/var/lib/scylla-manager/scylla_manager.crt","TLSKeyFile":"/var/lib/scylla-manager/scylla_manager.key","TLSCAFile":"","Prometheus":":56090","PrometheusScrapeInterval":5000000000,"debug":"127.0.0.1:56112","Logger":{"Mode":"stderr","Level":"info","Development":false},"Database":{"Hosts":["scylla-manager-cluster-manager-dc-manager-rack-0.scylla-manager.svc"],"SSL":false,"User":"","Password":"","LocalDC":"","Keyspace":"scylla_manager","MigrateDir":"/etc/scylla-manager/cql","MigrateTimeout":30000000000,"MigrateMaxWaitSchemaAgreement":300000000000,"ReplicationFactor":1,"Timeout":600000000,"TokenAware":true},"SSL":{"CertFile":"","Validate":true,"UserCertFile":"","UserKeyFile":""},"Healthcheck":{"Timeout":250000000,"SSLTimeout":750000000},"Backup":{"DiskSpaceFreeMinPercent":10,"AgeMax":43200000000000},"Repair":{"SegmentsPerRepair":1,"ShardParallelMax":0,"ShardFailedSegmentsMax":100,"PollInterval":200000000,"ErrorBackoff":300000000000,"AgeMax":0,"ShardingIgnoreMsbBits":12}},"config_files":["/mnt/etc/scylla-manager/scylla-manager.yaml"],"_trace_id":"xQhkJ0OuR8e6iMDEpM62Hg"}
-{"L":"INFO","T":"2020-09-23T11:26:54.519Z","M":"Checking database connectivity...","_trace_id":"xQhkJ0OuR8e6iMDEpM62Hg"}
-```
-
-If there are no errors in the logs, let's spin a Scylla Cluster.
-
-## Cluster registration
-
-
-When the Scylla Manager is fully up and running, lets create a regular instance of Scylla cluster.
-
-See [generic tutorial](generic.md) to spawn your cluster.
-
-Note: If you already have some Scylla Clusters, after installing Manager they should be
-automatically registered in Scylla Manager.
-
-Once cluster reaches desired node count, cluster status will be updated with ID under which it was registered in Manager.
-
- ```console
-kubectl -n scylla describe Cluster
-
-[...]
-Status:
- Manager Id: d1d532cd-49f2-4c97-9263-25126532803b
- Racks:
- us-east-1a:
- Members: 3
- Ready Members: 3
- Version: 4.0.0
-```
-You can use this ID to talk to Scylla Manager using `sctool` CLI installed in Scylla Manager Pod.
-You can also use Cluster name in `namespace/cluster-name` format.
-
-```console
-kubectl -n scylla-manager exec -ti scylla-manager-scylla-manager-7bd9f968b9-w25jw -- sctool task list
-
-Cluster: scylla/simple-cluster (d1d532cd-49f2-4c97-9263-25126532803b)
-╭─────────────────────────────────────────────────────────────┬──────────────────────────────────────┬────────────────────────────────┬────────╮
-│ Task │ Arguments │ Next run │ Status │
-├─────────────────────────────────────────────────────────────┼──────────────────────────────────────┼────────────────────────────────┼────────┤
-│ healthcheck/400b2723-eec5-422a-b7f3-236a0e10575b │ │ 23 Sep 20 14:28:42 CEST (+15s) │ DONE │
-│ healthcheck_rest/28169610-a969-4c20-9d11-ab7568b8a1bd │ │ 23 Sep 20 14:29:57 CEST (+1m) │ NEW │
-╰─────────────────────────────────────────────────────────────┴──────────────────────────────────────┴────────────────────────────────┴────────╯
-
-```
-
-Scylla Manager by default registers recurring healhcheck tasks for Agent and for each of the enabled frontends (CQL, Alternator).
-
-In this task listing we can see CQL and REST healthchecks.
-
-## Task scheduling
-
-You can either define tasks prior Cluster creation, or for existing Cluster.
-Let's edit already running cluster definition to add repair and backup task.
-```console
-kubectl -n scylla edit Cluster simple-cluster
-```
-
-Add following task definition to Cluster spec:
-```
- repairs:
- - name: "users repair"
- keyspace: ["users"]
- interval: "1d"
- backups:
- - name: "weekly backup"
- location: ["s3:cluster-backups"]
- retention: 3
- interval: "7d"
- - name: "daily backup"
- location: ["s3:cluster-backups"]
- retention: 7
- interval: "1d"
-```
-
-For full task definition configuration consult [ScyllaCluster CRD](api-reference/groups/scylla.scylladb.com/scyllaclusters.rst).
-
-**Note**: Scylla Manager Agent must have access to above bucket prior the update in order to schedule backup task.
-Consult Scylla Manager documentation for details on how to set it up.
-
-Scylla Manager Controller will spot this change and will schedule tasks in Scylla Manager.
-
-```console
-kubectl -n scylla-manager exec -ti scylla-manager-scylla-manager-7bd9f968b9-w25jw -- sctool task list
-
-Cluster: scylla/simple-cluster (d1d532cd-49f2-4c97-9263-25126532803b)
-╭─────────────────────────────────────────────────────────────┬──────────────────────────────────────┬────────────────────────────────┬────────╮
-│ Task │ Arguments │ Next run │ Status │
-├─────────────────────────────────────────────────────────────┼──────────────────────────────────────┼────────────────────────────────┼────────┤
-│ healthcheck/400b2723-eec5-422a-b7f3-236a0e10575b │ │ 23 Sep 20 14:28:42 CEST (+15s) │ DONE │
-│ backup/275aae7f-c436-4fc8-bcec-479e65fb8372 │ -L s3:cluster-backups --retention 3 │ 23 Sep 20 14:28:58 CEST (+7d) │ NEW │
-│ healthcheck_rest/28169610-a969-4c20-9d11-ab7568b8a1bd │ │ 23 Sep 20 14:29:57 CEST (+1m) │ NEW │
-│ repair/d4946360-c29d-4bb4-8b9d-619ada495c2a │ │ 23 Sep 20 14:38:42 CEST │ NEW │
-╰─────────────────────────────────────────────────────────────┴──────────────────────────────────────┴────────────────────────────────┴────────╯
-
-```
-
-As you can see, we have two new tasks, weekly recurring backup, and one repair which should start shortly.
-
-To check progress of run you can use following command:
-
-```console
-kubectl -n scylla-manager exec -ti scylla-manager-scylla-manager-7bd9f968b9-w25jw -- sctool task progress --cluster d1d532cd-49f2-4c97-9263-25126532803b repair/d4946360-c29d-4bb4-8b9d-619ada495c2a
-Status: RUNNING
-Start time: 23 Sep 20 14:38:42 UTC
-Duration: 13s
-Progress: 2.69%
-Datacenters:
- - us-east-1
-+--------------------+-------+
-| system_auth | 8.06% |
-| system_distributed | 0.00% |
-| system_traces | 0.00% |
-+--------------------+-------+
-
-```
-Other tasks can be also tracked using the same command, but using different task ID.
-Task IDs are present in Cluster Status as well as in task listing.
-
-## Clean Up
-
-To clean up all resources associated with Scylla Manager, you can run the commands below.
-
-**NOTE:** this will destroy your Scylla Manager database and delete all of its associated data.
-
-```console
-kubectl delete -f deploy/manager-prod.yaml
-```
-
-## Troubleshooting
-
-**Manager is not running**
-
-If the Scylla Manager does not come up, the first step would be to examine the Manager and Controller logs:
-
-```console
-kubectl -n scylla-manager logs -f scylla-manager-controller-0 scylla-manager-controller
-kubectl -n scylla-manager logs -f scylla-manager-controller-0 scylla-manager-scylla-manager-7bd9f968b9-w25jw
-```
-
-
-**My task wasn't scheduled**
-
-If your task wasn't scheduled, Cluster status will be updated with error messages for each failed task.
-You can also consult Scylla Manager logs.
-
-Example:
-
-Following status describes error when backup task cannot be scheduled, due to lack of access to bucket:
-```console
-Status:
- Backups:
- Error: create backup target: location is not accessible: 10.100.16.62: giving up after 2 attempts: after 15s: timeout - make sure the location is correct and credentials are set, to debug SSH to 10.100.16.62 and run "scylla-manager-agent check-location -L s3:manager-test --debug"; 10.107.193.33: giving up after 2 attempts: after 15s: timeout - make sure the location is correct and credentials are set, to debug SSH to 10.107.193.33 and run "scylla-manager-agent check-location -L s3:manager-test --debug"; 10.109.197.60: giving up after 2 attempts: after 15s: timeout - make sure the location is correct and credentials are set, to debug SSH to 10.109.197.60 and run "scylla-manager-agent check-location -L s3:manager-test --debug"
- Id: 00000000-0000-0000-0000-000000000000
- Interval: 0
- Location:
- s3:manager-test
- Name: adhoc backup
- Num Retries: 3
- Retention: 3
- Start Date: now
- Manager Id: 2b9dbe8c-9daa-4703-a66d-c29f63a917c8
- Racks:
- us-east-1a:
- Members: 3
- Ready Members: 3
- Version: 4.0.0
-```
-
-Because Controller is infinitely retrying to schedule each defined task, once permission issues will be resolved,
-task should appear in task listing and Cluster status.
diff --git a/docs/source/migration.md b/docs/source/migration.md
deleted file mode 100644
index 6b450637a22..00000000000
--- a/docs/source/migration.md
+++ /dev/null
@@ -1,146 +0,0 @@
-# Version migrations
-
-
-## `v0.3.0` -> `v1.0.0` migration
-
-`v0.3.0` used a very common name as a CRD kind (`Cluster`). In `v1.0.0` this issue was solved by using less common kind
-which is easier to disambiguate (`ScyllaCluster`).
-***This change is backward incompatible, which means manual migration is needed.***
-
-This procedure involves having two CRDs registered at the same time. We will detach Scylla Pods
-from Scylla Operator for a short period to ensure that nothing is garbage collected when Scylla Operator is upgraded.
-Compared to the [upgrade guide](upgrade.md) where full deletion is requested, this procedure shouldn't cause downtimes.
-Although detaching resources from their controller is considered hacky. This means that you shouldn't run procedure
-out of the box on production. Make sure this procedure works well multiple times on your staging environment first.
-
-***Read the whole procedure and make sure you understand what is going on before executing any of the commands!***
-
-In case of any issues or questions regarding this procedure, you're welcomed on our [Scylla Users Slack](http://slack.scylladb.com/)
-on #kubernetes channel.
-
-## Procedure
-
-1. Execute this whole procedure for each cluster sequentially. To get a list of existing clusters execute the following
- ```
- kubectl -n scylla get cluster.scylla.scylladb.com
-
- NAME AGE
- simple-cluster 30m
- ```
- All below commands will use `scylla` namespace and `simple-cluster` as a cluster name.
-1. Make sure you're using v1.0.0 tag:
- ```
- git checkout v1.0.0
- ```
-1. Upgrade your `cert-manager` to `v1.0.0`. If you installed it from a static file from this repo, simply execute the following:
- ```
- kubectl apply -f examples/common/cert-manager.yaml
- ```
- If your `cert-manager` was installed in another way, follow official instructions on `cert-manager` website.
-1. `deploy/operator.yaml` file contains multiple resources. Extract **only** `CustomResourceDefinition` to separate file.
-1. Install v1.0.0 CRD definition from file created in the previous step:
- ```
- kubectl apply -f examples/common/crd.yaml
- ```
-1. Save your existing `simple-cluster` Cluster definition to a file:
- ```
- kubectl -n scylla get cluster.scylla.scylladb.com simple-cluster -o yaml > existing-cluster.yaml
- ```
-1. Migrate `Kind` and `ApiVersion` to new values using:
- ```
- sed -i 's/scylla.scylladb.com\/v1alpha1/scylla.scylladb.com\/v1/g' existing-cluster.yaml
- sed -i 's/kind: Cluster/kind: ScyllaCluster/g' existing-cluster.yaml
- ```
-1. Install migrated CRD instance
- ```
- kubectl apply -f existing-cluster.yaml
- ```
- At this point, we should have two CRDs describing your Scylla cluster, although the new one is not controlled by the Operator.
-1. Get UUID of newly created ScyllaCluster resource:
- ```
- kubectl -n scylla get ScyllaCluster simple-cluster --template="{{ .metadata.uid }}"
-
- 12a3678d-8511-4c9c-8a48-fa78d3992694
- ```
- Save output UUID somewhere, it will be referred as `` in commands below.
-
- ***Depending on your shell, you might get additional '%' sign at the end of UUID, make sure to remove it!***
-
-1. Upgrade ClusterRole attached to each of the Scylla nodes to grant them permission to lookup Scylla clusters:
- ```
- kubectl patch ClusterRole simple-cluster-member --type "json" -p '[{"op":"add","path":"/rules/-","value":{"apiGroups":["scylla.scylladb.com"],"resources":["scyllaclusters"],"verbs":["get"]}}]'
- ```
- Amend role name according to your cluster name, it should look like `-member`.
-1. Get a list of all Services associated with your cluster. First get list of all services:
- ```
- kubectl -n scylla get svc -l "scylla/cluster=simple-cluster"
-
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- simple-cluster-client ClusterIP None 9180/TCP 109m
- simple-cluster-us-east-1-us-east-1a-0 ClusterIP 10.43.23.96 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 109m
- simple-cluster-us-east-1-us-east-1a-1 ClusterIP 10.43.66.22 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 108m
- simple-cluster-us-east-1-us-east-1a-2 ClusterIP 10.43.246.25 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 106m
-
- ```
-1. For each service, change its `ownerReference` to point to new CRD instance:
- ```
- kubectl -n scylla patch svc --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences/0/apiVersion", "value":"scylla.scylladb.com/v1"}, {"op": "replace", "path": "/metadata/ownerReferences/0/kind", "value":"ScyllaCluster"}, {"op": "replace", "path": "/metadata/ownerReferences/0/uid", "value":""}]'
- ```
- Replace `` with Service name, and `` with saved UUID from one of the previous steps.
-1. Get a list of all Services again to see if none was deleted. Check also "Age" column, it shouldn't be lower than previous result.
- ```
- kubectl -n scylla get svc -l "scylla/cluster=simple-cluster"
-
- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
- simple-cluster-client ClusterIP None 9180/TCP 110m
- simple-cluster-us-east-1-us-east-1a-0 ClusterIP 10.43.23.96 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 110m
- simple-cluster-us-east-1-us-east-1a-1 ClusterIP 10.43.66.22 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 109m
- simple-cluster-us-east-1-us-east-1a-2 ClusterIP 10.43.246.25 7000/TCP,7001/TCP,7199/TCP,10001/TCP,9042/TCP,9142/TCP,9160/TCP 107m
-
- ```
-1. Get a list of StatefulSets associated with your cluster:
- ```
- kubectl -n scylla get sts -l "scylla/cluster=simple-cluster"
-
- NAME READY AGE
- simple-cluster-us-east-1-us-east-1a 3/3 104m
- ```
-1. For each StatefulSet from previous step, change its `ownerReference` to point to new CRD instance.
-
- ```
- kubectl -n scylla patch sts --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences/0/apiVersion", "value":"scylla.scylladb.com/v1"}, {"op": "replace", "path": "/metadata/ownerReferences/0/kind", "value":"ScyllaCluster"}, {"op": "replace", "path": "/metadata/ownerReferences/0/uid", "value":""}]'
- ```
- Replace `` with StatefulSet name, and `` with saved UUID from one of the previous steps.
-
-1. Now when all k8s resources bound to Scylla are attached to new CRD, we can remove 0.3.0 Operator and old CRD definition.
- Checkout `v0.3.0` version, and remove Scylla Operator, and old CRD:
- ```
- git checkout v0.3.0
- kubectl delete -f examples/generic/operator.yaml
- ```
-1. Checkout `v1.0.0`, and install upgraded Scylla Operator:
- ```
- git checkout v1.0.0
- kubectl apply -f deploy/operator.yaml
- ```
-1. Wait until Scylla Operator boots up:
- ```
- kubectl -n scylla-operator-system wait --for=condition=ready pod --all --timeout=600s
- ```
-1. Get a list of StatefulSets associated with your cluster:
- ```
- kubectl -n scylla get sts -l "scylla/cluster=simple-cluster"
-
- NAME READY AGE
- simple-cluster-us-east-1-us-east-1a 3/3 104m
-1. For each StatefulSet from previous step, change its sidecar container image to `v1.0.0`, and wait until change will be propagated. This step will initiate a rolling restart of pods one by one.
- ```
- kubectl -n scylla patch sts --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/initContainers/0/image", "value":"scylladb/scylla-operator:v1.0.0"}]'
- kubectl -n scylla rollout status sts
- ```
- Replace `` with StatefulSet name.
-1. If you're using Scylla Manager, bump Scylla Manager Controller image to `v1.0.0`
- ```
- kubectl -n scylla-manager-system patch sts scylla-manager-controller --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"scylladb/scylla-operator:v1.0.0"}]'
- ```
-1. Your Scylla cluster is now migrated to `v1.0.0`.
diff --git a/docs/source/multidc/index.rst b/docs/source/multidc/index.rst
deleted file mode 100644
index a2f1eae7709..00000000000
--- a/docs/source/multidc/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-==========================================================
-Deploying multi-datacenter ScyllaDB clusters in Kubernetes
-==========================================================
-
-Prepare a platform for a multi datacenter ScyllaDB cluster deployment:
-
-.. toctree::
- :maxdepth: 1
-
- eks
- gke
-
-Deploy a multi-datacenter ScyllaDB cluster in Kubernetes:
-
-.. toctree::
- :maxdepth: 1
-
- multidc
diff --git a/docs/source/index.rst b/docs/source/old-index.rst.old
similarity index 100%
rename from docs/source/index.rst
rename to docs/source/old-index.rst.old
diff --git a/docs/source/performance.md b/docs/source/performance.md
deleted file mode 100644
index ff48cfb582d..00000000000
--- a/docs/source/performance.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# Performance tuning
-
-Scylla Operator 1.6 introduces a new experimental feature allowing users to optimize Kubernetes nodes.
-
-:::{warning}
-We recommend that you first try out the performance tuning on a pre-production instance.
-Given the nature of the underlying tuning script, undoing the changes requires rebooting the Kubernetes node(s).
-:::
-
-## Node tuning
-
-Starting from Operator 1.6, a new CRD called NodeConfig is available, allowing users to target Nodes which should be tuned.
-When a Node is supposed to be optimized, the Scylla Operator creates a DaemonSet covering these Nodes.
-Nodes matching the provided placement conditions will be subject to tuning.
-
-Below example NodeConfig tunes nodes having `scylla.scylladb.com/node-type=scylla` label:
-```
-apiVersion: scylla.scylladb.com/v1alpha1
-kind: NodeConfig
-metadata:
- name: cluster
-spec:
- placement:
- nodeSelector:
- scylla.scylladb.com/node-type: scylla
-```
-For more details about new CRD use:
-```
-kubectl explain nodeconfigs.scylla.scylladb.com/v1alpha1
-```
-
-For all optimizations we use a Python script available in the Scylla image called perftune.
-Perftune executes the performance optmizations like tuning the kernel, network, disk devices, spreading IRQs across CPUs and more.
-
-Tuning consists of two separate optimizations: common node tuning, and tuning based on Scylla Pods and their resource assignment.
-Node tuning is executed immediately. Pod tuning is executed when Scylla Pod lands on the same Node.
-
-Scylla works most efficently when it's pinned to CPU and not interrupted.
-One of the most common causes of context-switching are network interrupts. Packets coming to a node need to be processed,
-and this requires CPU shares.
-
-On K8s we always have at least a couple of processes running on the node: kubelet, kubernetes provider applications, daemons etc.
-These processes require CPU shares, so we cannot dedicate entire node processing power to Scylla, we need to leave space for others.
-We take advantage of it, and we pin IRQs to CPUs not used by any Scylla Pods exclusively.
-
-Tuning resources are created in a special namespace called `scylla-operator-node-tuning`.
-
-The tuning is applied only to pods with `Guaranteed` QoS class. Please double check your ScyllaCluster resource specification
-to see if it meets all conditions.
-
-## Kubernetes tuning
-
-By default, the kubelet uses the CFS quota to enforce pod CPU limits.
-When the node runs many CPU-bound pods, the workload can move around different CPU cores depending on whether the pod
-is throttled and which CPU cores are available.
-However, kubelet may be configured to assign CPUs exclusively, by setting the CPU manager policy to static.
-
-Setting up kubelet configuration is provider specific. Please check the docs for your distribution or talk to your
-provider.
-
-Only pods within the [Guaranteed QoS class](https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/#create-a-pod-that-gets-assigned-a-qos-class-of-guaranteed)) can take advantage of this option.
-When such pod lands on a Node, kubelet will pin them to specific CPUs, and those won't be part of the shared pool.
-
-In our case there are two requirements each ScyllaCluster must fulfill to receive a Guaranteed QoS class:
-* resource request and limits must be equal or only limits have to be provided
-* agentResources must be provided and their requests and limits must be equal, or only limits have to be provided
-
-An example of such a ScyllaCluster that receives a Guaranteed QoS class is below:
-
-```
-apiVersion: scylla.scylladb.com/v1
-kind: ScyllaCluster
-metadata:
- name: guaranteed-cluster
- namespace: scylla
-spec:
- agentVersion: 3.4.0
- version: 6.2.0
- datacenter:
- name: us-east-1
- racks:
- - name: us-east-1a
- members: 3
- storage:
- capacity: 500Gi
- agentResources:
- requests:
- cpu: 1
- memory: 1G
- limits:
- cpu: 1
- memory: 1G
- resources:
- requests:
- cpu: 4
- memory: 16G
- limits:
- cpu: 4
- memory: 16G
-```
diff --git a/docs/source/quickstarts/eks.md b/docs/source/quickstarts/eks.md
new file mode 100644
index 00000000000..b24de89edaf
--- /dev/null
+++ b/docs/source/quickstarts/eks.md
@@ -0,0 +1,134 @@
+# Deploying ScyllaDB on EKS
+
+This is a quickstart guide to help you set up a basic EKS cluster quickly with local NVMes and solid performance.
+
+This is by no means a complete guide, and you should always consult your provider's documentation.
+
+## Prerequisites
+
+In this guide we'll be using `eksctl` to set up the cluster, and you'll need `kubectl` to talk to it.
+
+If you don't have those already, or are not available through your package manager, you can try these links to learn more about installing them:
+- [eksctl](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html)
+- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
+
+## Creating an EKS cluster
+
+First, let's create a declarative config to used with eksctl
+:::{code} bash
+:linenos:
+:emphasize-lines: 10
+
+cat > clusterconfig.eksctl.yaml < systemconfig.yaml < (By default GKE doesn't give you the necessary RBAC permissions)
+
+Get the credentials for your new cluster
+```
+gcloud container clusters get-credentials "${CLUSTER_NAME}" --zone="${GCP_ZONE}"
+```
+
+Create a ClusterRoleBinding for your user.
+In order for this to work you need to have at least permission `container.clusterRoleBindings.create`.
+The easiest way to obtain this permission is to enable the `Kubernetes Engine Admin` role for your user in the GCP IAM web interface.
+```
+kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user "${GCP_USER}"
+```
+
+
+## Setting up storage and tuning
+
+:::{code-block} bash
+:linenos:
+
+kubectl apply --server-side -f=<